max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
openskill/statistics.py | CalColson/openskill.py | 120 | 9400 | <gh_stars>100-1000
import sys
import scipy.stats
normal = scipy.stats.norm(0, 1)
def phi_major(x):
return normal.cdf(x)
def phi_minor(x):
return normal.pdf(x)
def v(x, t):
xt = x - t
denom = phi_major(xt)
return -xt if (denom < sys.float_info.epsilon) else phi_minor(xt) / denom
def w(x, t):
xt = x - t
denom = phi_major(xt)
if denom < sys.float_info.epsilon:
return 1 if (x < 0) else 0
return v(x, t) * (v(x, t) + xt)
def vt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < 1e-5:
if x < 0:
return -x - t
return -x + t
a = phi_minor(-t - xx) - phi_minor(t - xx)
return (-a if x < 0 else a) / b
def wt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < sys.float_info.epsilon:
return 1.0
return ((t - xx) * phi_minor(t - xx) + (t + xx) * phi_minor(-t - xx)) / b + vt(
x, t
) * vt(x, t)
| import sys
import scipy.stats
normal = scipy.stats.norm(0, 1)
def phi_major(x):
return normal.cdf(x)
def phi_minor(x):
return normal.pdf(x)
def v(x, t):
xt = x - t
denom = phi_major(xt)
return -xt if (denom < sys.float_info.epsilon) else phi_minor(xt) / denom
def w(x, t):
xt = x - t
denom = phi_major(xt)
if denom < sys.float_info.epsilon:
return 1 if (x < 0) else 0
return v(x, t) * (v(x, t) + xt)
def vt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < 1e-5:
if x < 0:
return -x - t
return -x + t
a = phi_minor(-t - xx) - phi_minor(t - xx)
return (-a if x < 0 else a) / b
def wt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < sys.float_info.epsilon:
return 1.0
return ((t - xx) * phi_minor(t - xx) + (t + xx) * phi_minor(-t - xx)) / b + vt(
x, t
) * vt(x, t) | none | 1 | 2.50236 | 3 |
|
src/openalea/container/graph.py | revesansparole/oacontainer | 0 | 9401 | # -*- coding: utf-8 -*-
#
# Graph : graph package
#
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
#
# File author(s): <NAME> <<EMAIL>>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# VPlants WebSite : https://gforge.inria.fr/projects/vplants/
#
"""This module provide a simple pure python implementation
for a graph interface
does not implement copy concept
"""
from id_dict import IdDict
class GraphError(Exception):
"""
base class of all graph exceptions
"""
class InvalidEdge(GraphError, KeyError):
"""
exception raised when a wrong edge id is provided
"""
class InvalidVertex(GraphError, KeyError):
"""
exception raised when a wrong vertex id is provided
"""
class Graph(object):
"""Directed graph with multiple links
in this implementation :
- vertices are tuple of edge_in,edge_out
- edges are tuple of source,target
"""
def __init__(self, graph=None, idgenerator="set"):
"""constructor
if graph is not none make a copy of the topological structure of graph
(i.e. don't use the same id)
args:
- graph (Graph): the graph to copy, default=None
- idgenerator (str): type of idgenerator to use, default 'set'
"""
self._vertices = IdDict(idgenerator=idgenerator)
self._edges = IdDict(idgenerator=idgenerator)
if graph is not None:
self.extend(graph)
# ##########################################################
#
# Graph concept
#
# ##########################################################
def source(self, eid):
"""Retrieve the source vertex of an edge
args:
- eid (int): edge id
return:
- (int): vertex id
"""
try:
return self._edges[eid][0]
except KeyError:
raise InvalidEdge(eid)
def target(self, eid):
"""Retrieve the target vertex of an edge
args:
- eid (int): edge id
return:
- (int): vertex id
"""
try:
return self._edges[eid][1]
except KeyError:
raise InvalidEdge(eid)
def edge_vertices(self, eid):
"""Retrieve both source and target vertex of an edge
args:
- eid (int): edge id
return:
- (int, int): source id, target id
"""
try:
return self._edges[eid]
except KeyError:
raise InvalidEdge(eid)
def edge(self, source, target):
"""Find the matching edge with same source and same target
return None if it don't succeed
args:
- source (int): source vertex
- target (int): target vertex
return:
- (int): edge id with same source and target
- (None): if search is unsuccessful
"""
if target not in self:
raise InvalidVertex(target)
for eid in self.out_edges(source):
if self.target(eid) == target:
return eid
return None
def __contains__(self, vid):
"""magic alias for `has_vertex`
"""
return self.has_vertex(vid)
def has_vertex(self, vid):
"""test whether a vertex belong to the graph
args:
- vid (int): id of vertex
return:
- (bool)
"""
return vid in self._vertices
def has_edge(self, eid):
"""test whether an edge belong to the graph
args:
- eid (int): id of edge
return:
- (bool)
"""
return eid in self._edges
def is_valid(self):
"""Test the validity of the graph
return:
- (bool)
"""
return True
# ##########################################################
#
# Vertex List Graph Concept
#
# ##########################################################
def vertices(self):
"""Iterator on all vertices
return:
- (iter of int)
"""
return iter(self._vertices)
def __iter__(self):
"""Magic alias for `vertices`
"""
return iter(self._vertices)
def nb_vertices(self):
"""Total number of vertices in the graph
return:
- (int)
"""
return len(self._vertices)
def __len__(self):
"""Magic alias for `nb_vertices`
"""
return self.nb_vertices()
def in_neighbors(self, vid):
"""Iterator on the neighbors of vid
where edges are directed from neighbor to vid
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
if vid not in self:
raise InvalidVertex(vid)
neighbors_list = [self.source(eid) for eid in self._vertices[vid][0]]
return iter(set(neighbors_list))
def out_neighbors(self, vid):
"""Iterator on the neighbors of vid
where edges are directed from vid to neighbor
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
if vid not in self:
raise InvalidVertex(vid)
neighbors_list = [self.target(eid) for eid in self._vertices[vid][1]]
return iter(set(neighbors_list))
def neighbors(self, vid):
"""Iterator on all neighbors of vid both in and out
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
neighbors_list = list(self.in_neighbors(vid))
neighbors_list.extend(self.out_neighbors(vid))
return iter(set(neighbors_list))
def nb_in_neighbors(self, vid):
"""Number of in neighbors of vid
where edges are directed from neighbor to vid
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.in_neighbors(vid))
return len(neighbors_set)
def nb_out_neighbors(self, vid):
"""Number of out neighbors of vid
where edges are directed from vid to neighbor
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.out_neighbors(vid))
return len(neighbors_set)
def nb_neighbors(self, vid):
"""Total number of both in and out neighbors of vid
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.neighbors(vid))
return len(neighbors_set)
# ##########################################################
#
# Edge List Graph Concept
#
# ##########################################################
def _iter_edges(self, vid):
"""
internal function that perform 'edges' with vid not None
"""
link_in, link_out = self._vertices[vid]
for eid in link_in:
yield eid
for eid in link_out:
yield eid
def edges(self, vid=None):
"""Iterate on all edges connected to a given vertex.
If vid is None (default), iterate on all edges in the graph
args:
- vid (int): vertex holdings edges, default (None)
return:
- (iter of int): iterator on edge ids
"""
if vid is None:
return iter(self._edges)
if vid not in self:
raise InvalidVertex(vid)
return self._iter_edges(vid)
def nb_edges(self, vid=None):
"""Number of edges connected to a given vertex.
If vid is None (default), total number of edges in the graph
args:
- vid (int): vertex holdings edges, default (None)
return:
- (int)
"""
if vid is None:
return len(self._edges)
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][0]) + len(self._vertices[vid][1])
def in_edges(self, vid):
"""Iterate on all edges pointing to a given vertex.
args:
- vid (int): vertex target of edges
return:
- (iter of int): iterator on edge ids
"""
if vid not in self:
raise InvalidVertex(vid)
for eid in self._vertices[vid][0]:
yield eid
def out_edges(self, vid):
"""Iterate on all edges away from a given vertex.
args:
- vid (int): vertex source of edges
return:
- (iter of int): iterator on edge ids
"""
if vid not in self:
raise InvalidVertex(vid)
for eid in self._vertices[vid][1]:
yield eid
def nb_in_edges(self, vid):
"""Number of edges pointing to a given vertex.
args:
- vid (int): vertex target of edges
return:
- (int)
"""
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][0])
def nb_out_edges(self, vid):
"""Number of edges away from a given vertex.
args:
- vid (int): vertex source of edges
return:
- (int)
"""
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][1])
# ##########################################################
#
# Mutable Vertex Graph concept
#
# ##########################################################
def add_vertex(self, vid=None):
"""Add a vertex to the graph.
If vid is not provided create a new vid
args:
- vid (int): id to use. If None (default) will generate a new one
return:
- vid (int): id used for the new vertex
"""
try:
return self._vertices.add((set(), set()), vid)
except KeyError:
raise InvalidVertex(vid)
def remove_vertex(self, vid):
"""Remove a specified vertex of the graph.
Also remove all edge attached to it.
args:
- vid (int): id of vertex to remove
"""
if vid not in self:
raise InvalidVertex(vid)
link_in, link_out = self._vertices[vid]
for edge in list(link_in):
self.remove_edge(edge)
for edge in list(link_out):
self.remove_edge(edge)
del self._vertices[vid]
def clear(self):
"""Remove all vertices and edges
don't change references to objects
"""
self._edges.clear()
self._vertices.clear()
# ##########################################################
#
# Mutable Edge Graph concept
#
# ##########################################################
def add_edge(self, sid, tid, eid=None):
"""Add an edge to the graph.
If eid is not provided generate a new one.
args:
- sid (int): id of source vertex
- tid (int): id of target vertex
- eid (int): id to use. If None (default) will generate a new one
return:
- eid (int): id used for new edge
"""
if sid not in self:
raise InvalidVertex(sid)
if tid not in self:
raise InvalidVertex(tid)
try:
eid = self._edges.add((sid, tid), eid)
except KeyError:
raise InvalidEdge(eid)
self._vertices[sid][1].add(eid)
self._vertices[tid][0].add(eid)
return eid
def remove_edge(self, eid):
"""Remove a specified edge from the graph.
args:
- eid (int): id of edge to remove
"""
if not self.has_edge(eid):
raise InvalidEdge(eid)
sid, tid = self._edges[eid]
self._vertices[sid][1].remove(eid)
self._vertices[tid][0].remove(eid)
del self._edges[eid]
def clear_edges(self):
"""Remove all the edges of the graph
don't change references to objects
"""
self._edges.clear()
for vid, (in_set, out_set) in self._vertices.iteritems():
in_set.clear()
out_set.clear()
# ##########################################################
#
# Extend Graph concept
#
# ##########################################################
def extend(self, graph):
"""Add the specified graph to self, create new vid and eid
args:
- graph (Graph): the graph to add
return:
- (dict of (int, int)): mapping between vertex id in graph and
vertex id in extended self
- (dict of (int, int)): mapping between edge id in graph and
edge id in extended self
"""
# vertex adding
trans_vid = {}
for vid in list(graph.vertices()):
trans_vid[vid] = self.add_vertex()
# edge adding
trans_eid = {}
for eid in list(graph.edges()):
sid = trans_vid[graph.source(eid)]
tid = trans_vid[graph.target(eid)]
trans_eid[eid] = self.add_edge(sid, tid)
return trans_vid, trans_eid
def sub_graph(self, vids):
"""
"""
raise NotImplemented
# from copy import deepcopy
# vids = set(vids)
#
# result = deepcopy(self)
# result._vertices.clear()
# result._edges.clear()
#
# for key, edges in self._vertices.items():
# if key in vids:
# inedges, outedges = edges
# sortedinedges = set(
# [eid for eid in inedges if self.source(eid) in vids])
# sortedoutedges = set(
# [eid for eid in outedges if self.target(eid) in vids])
# result._vertices.add((sortedinedges, sortedoutedges), key)
# for eid in sortedoutedges:
# result._edges.add(self._edges[eid], eid)
#
# return result
| # -*- coding: utf-8 -*-
#
# Graph : graph package
#
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
#
# File author(s): <NAME> <<EMAIL>>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# VPlants WebSite : https://gforge.inria.fr/projects/vplants/
#
"""This module provide a simple pure python implementation
for a graph interface
does not implement copy concept
"""
from id_dict import IdDict
class GraphError(Exception):
"""
base class of all graph exceptions
"""
class InvalidEdge(GraphError, KeyError):
"""
exception raised when a wrong edge id is provided
"""
class InvalidVertex(GraphError, KeyError):
"""
exception raised when a wrong vertex id is provided
"""
class Graph(object):
"""Directed graph with multiple links
in this implementation :
- vertices are tuple of edge_in,edge_out
- edges are tuple of source,target
"""
def __init__(self, graph=None, idgenerator="set"):
"""constructor
if graph is not none make a copy of the topological structure of graph
(i.e. don't use the same id)
args:
- graph (Graph): the graph to copy, default=None
- idgenerator (str): type of idgenerator to use, default 'set'
"""
self._vertices = IdDict(idgenerator=idgenerator)
self._edges = IdDict(idgenerator=idgenerator)
if graph is not None:
self.extend(graph)
# ##########################################################
#
# Graph concept
#
# ##########################################################
def source(self, eid):
"""Retrieve the source vertex of an edge
args:
- eid (int): edge id
return:
- (int): vertex id
"""
try:
return self._edges[eid][0]
except KeyError:
raise InvalidEdge(eid)
def target(self, eid):
"""Retrieve the target vertex of an edge
args:
- eid (int): edge id
return:
- (int): vertex id
"""
try:
return self._edges[eid][1]
except KeyError:
raise InvalidEdge(eid)
def edge_vertices(self, eid):
"""Retrieve both source and target vertex of an edge
args:
- eid (int): edge id
return:
- (int, int): source id, target id
"""
try:
return self._edges[eid]
except KeyError:
raise InvalidEdge(eid)
def edge(self, source, target):
"""Find the matching edge with same source and same target
return None if it don't succeed
args:
- source (int): source vertex
- target (int): target vertex
return:
- (int): edge id with same source and target
- (None): if search is unsuccessful
"""
if target not in self:
raise InvalidVertex(target)
for eid in self.out_edges(source):
if self.target(eid) == target:
return eid
return None
def __contains__(self, vid):
"""magic alias for `has_vertex`
"""
return self.has_vertex(vid)
def has_vertex(self, vid):
"""test whether a vertex belong to the graph
args:
- vid (int): id of vertex
return:
- (bool)
"""
return vid in self._vertices
def has_edge(self, eid):
"""test whether an edge belong to the graph
args:
- eid (int): id of edge
return:
- (bool)
"""
return eid in self._edges
def is_valid(self):
"""Test the validity of the graph
return:
- (bool)
"""
return True
# ##########################################################
#
# Vertex List Graph Concept
#
# ##########################################################
def vertices(self):
"""Iterator on all vertices
return:
- (iter of int)
"""
return iter(self._vertices)
def __iter__(self):
"""Magic alias for `vertices`
"""
return iter(self._vertices)
def nb_vertices(self):
"""Total number of vertices in the graph
return:
- (int)
"""
return len(self._vertices)
def __len__(self):
"""Magic alias for `nb_vertices`
"""
return self.nb_vertices()
def in_neighbors(self, vid):
"""Iterator on the neighbors of vid
where edges are directed from neighbor to vid
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
if vid not in self:
raise InvalidVertex(vid)
neighbors_list = [self.source(eid) for eid in self._vertices[vid][0]]
return iter(set(neighbors_list))
def out_neighbors(self, vid):
"""Iterator on the neighbors of vid
where edges are directed from vid to neighbor
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
if vid not in self:
raise InvalidVertex(vid)
neighbors_list = [self.target(eid) for eid in self._vertices[vid][1]]
return iter(set(neighbors_list))
def neighbors(self, vid):
"""Iterator on all neighbors of vid both in and out
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
neighbors_list = list(self.in_neighbors(vid))
neighbors_list.extend(self.out_neighbors(vid))
return iter(set(neighbors_list))
def nb_in_neighbors(self, vid):
"""Number of in neighbors of vid
where edges are directed from neighbor to vid
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.in_neighbors(vid))
return len(neighbors_set)
def nb_out_neighbors(self, vid):
"""Number of out neighbors of vid
where edges are directed from vid to neighbor
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.out_neighbors(vid))
return len(neighbors_set)
def nb_neighbors(self, vid):
"""Total number of both in and out neighbors of vid
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.neighbors(vid))
return len(neighbors_set)
# ##########################################################
#
# Edge List Graph Concept
#
# ##########################################################
def _iter_edges(self, vid):
"""
internal function that perform 'edges' with vid not None
"""
link_in, link_out = self._vertices[vid]
for eid in link_in:
yield eid
for eid in link_out:
yield eid
def edges(self, vid=None):
"""Iterate on all edges connected to a given vertex.
If vid is None (default), iterate on all edges in the graph
args:
- vid (int): vertex holdings edges, default (None)
return:
- (iter of int): iterator on edge ids
"""
if vid is None:
return iter(self._edges)
if vid not in self:
raise InvalidVertex(vid)
return self._iter_edges(vid)
def nb_edges(self, vid=None):
"""Number of edges connected to a given vertex.
If vid is None (default), total number of edges in the graph
args:
- vid (int): vertex holdings edges, default (None)
return:
- (int)
"""
if vid is None:
return len(self._edges)
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][0]) + len(self._vertices[vid][1])
def in_edges(self, vid):
"""Iterate on all edges pointing to a given vertex.
args:
- vid (int): vertex target of edges
return:
- (iter of int): iterator on edge ids
"""
if vid not in self:
raise InvalidVertex(vid)
for eid in self._vertices[vid][0]:
yield eid
def out_edges(self, vid):
"""Iterate on all edges away from a given vertex.
args:
- vid (int): vertex source of edges
return:
- (iter of int): iterator on edge ids
"""
if vid not in self:
raise InvalidVertex(vid)
for eid in self._vertices[vid][1]:
yield eid
def nb_in_edges(self, vid):
"""Number of edges pointing to a given vertex.
args:
- vid (int): vertex target of edges
return:
- (int)
"""
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][0])
def nb_out_edges(self, vid):
"""Number of edges away from a given vertex.
args:
- vid (int): vertex source of edges
return:
- (int)
"""
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][1])
# ##########################################################
#
# Mutable Vertex Graph concept
#
# ##########################################################
def add_vertex(self, vid=None):
"""Add a vertex to the graph.
If vid is not provided create a new vid
args:
- vid (int): id to use. If None (default) will generate a new one
return:
- vid (int): id used for the new vertex
"""
try:
return self._vertices.add((set(), set()), vid)
except KeyError:
raise InvalidVertex(vid)
def remove_vertex(self, vid):
"""Remove a specified vertex of the graph.
Also remove all edge attached to it.
args:
- vid (int): id of vertex to remove
"""
if vid not in self:
raise InvalidVertex(vid)
link_in, link_out = self._vertices[vid]
for edge in list(link_in):
self.remove_edge(edge)
for edge in list(link_out):
self.remove_edge(edge)
del self._vertices[vid]
def clear(self):
"""Remove all vertices and edges
don't change references to objects
"""
self._edges.clear()
self._vertices.clear()
# ##########################################################
#
# Mutable Edge Graph concept
#
# ##########################################################
def add_edge(self, sid, tid, eid=None):
"""Add an edge to the graph.
If eid is not provided generate a new one.
args:
- sid (int): id of source vertex
- tid (int): id of target vertex
- eid (int): id to use. If None (default) will generate a new one
return:
- eid (int): id used for new edge
"""
if sid not in self:
raise InvalidVertex(sid)
if tid not in self:
raise InvalidVertex(tid)
try:
eid = self._edges.add((sid, tid), eid)
except KeyError:
raise InvalidEdge(eid)
self._vertices[sid][1].add(eid)
self._vertices[tid][0].add(eid)
return eid
def remove_edge(self, eid):
"""Remove a specified edge from the graph.
args:
- eid (int): id of edge to remove
"""
if not self.has_edge(eid):
raise InvalidEdge(eid)
sid, tid = self._edges[eid]
self._vertices[sid][1].remove(eid)
self._vertices[tid][0].remove(eid)
del self._edges[eid]
def clear_edges(self):
"""Remove all the edges of the graph
don't change references to objects
"""
self._edges.clear()
for vid, (in_set, out_set) in self._vertices.iteritems():
in_set.clear()
out_set.clear()
# ##########################################################
#
# Extend Graph concept
#
# ##########################################################
def extend(self, graph):
"""Add the specified graph to self, create new vid and eid
args:
- graph (Graph): the graph to add
return:
- (dict of (int, int)): mapping between vertex id in graph and
vertex id in extended self
- (dict of (int, int)): mapping between edge id in graph and
edge id in extended self
"""
# vertex adding
trans_vid = {}
for vid in list(graph.vertices()):
trans_vid[vid] = self.add_vertex()
# edge adding
trans_eid = {}
for eid in list(graph.edges()):
sid = trans_vid[graph.source(eid)]
tid = trans_vid[graph.target(eid)]
trans_eid[eid] = self.add_edge(sid, tid)
return trans_vid, trans_eid
def sub_graph(self, vids):
"""
"""
raise NotImplemented
# from copy import deepcopy
# vids = set(vids)
#
# result = deepcopy(self)
# result._vertices.clear()
# result._edges.clear()
#
# for key, edges in self._vertices.items():
# if key in vids:
# inedges, outedges = edges
# sortedinedges = set(
# [eid for eid in inedges if self.source(eid) in vids])
# sortedoutedges = set(
# [eid for eid in outedges if self.target(eid) in vids])
# result._vertices.add((sortedinedges, sortedoutedges), key)
# for eid in sortedoutedges:
# result._edges.add(self._edges[eid], eid)
#
# return result
| en | 0.482161 | # -*- coding: utf-8 -*- # # Graph : graph package # # Copyright or Copr. 2006 INRIA - CIRAD - INRA # # File author(s): <NAME> <<EMAIL>> # # Distributed under the Cecill-C License. # See accompanying file LICENSE.txt or copy at # http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html # # VPlants WebSite : https://gforge.inria.fr/projects/vplants/ # This module provide a simple pure python implementation for a graph interface does not implement copy concept base class of all graph exceptions exception raised when a wrong edge id is provided exception raised when a wrong vertex id is provided Directed graph with multiple links in this implementation : - vertices are tuple of edge_in,edge_out - edges are tuple of source,target constructor if graph is not none make a copy of the topological structure of graph (i.e. don't use the same id) args: - graph (Graph): the graph to copy, default=None - idgenerator (str): type of idgenerator to use, default 'set' # ########################################################## # # Graph concept # # ########################################################## Retrieve the source vertex of an edge args: - eid (int): edge id return: - (int): vertex id Retrieve the target vertex of an edge args: - eid (int): edge id return: - (int): vertex id Retrieve both source and target vertex of an edge args: - eid (int): edge id return: - (int, int): source id, target id Find the matching edge with same source and same target return None if it don't succeed args: - source (int): source vertex - target (int): target vertex return: - (int): edge id with same source and target - (None): if search is unsuccessful magic alias for `has_vertex` test whether a vertex belong to the graph args: - vid (int): id of vertex return: - (bool) test whether an edge belong to the graph args: - eid (int): id of edge return: - (bool) Test the validity of the graph return: - (bool) # ########################################################## # # Vertex List Graph Concept # # ########################################################## Iterator on all vertices return: - (iter of int) Magic alias for `vertices` Total number of vertices in the graph return: - (int) Magic alias for `nb_vertices` Iterator on the neighbors of vid where edges are directed from neighbor to vid args: - vid (int): vertex id return: - (iter of int): iter of vertex id Iterator on the neighbors of vid where edges are directed from vid to neighbor args: - vid (int): vertex id return: - (iter of int): iter of vertex id Iterator on all neighbors of vid both in and out args: - vid (int): vertex id return: - (iter of int): iter of vertex id Number of in neighbors of vid where edges are directed from neighbor to vid args: - vid (int): vertex id return: - (int) Number of out neighbors of vid where edges are directed from vid to neighbor args: - vid (int): vertex id return: - (int) Total number of both in and out neighbors of vid args: - vid (int): vertex id return: - (int) # ########################################################## # # Edge List Graph Concept # # ########################################################## internal function that perform 'edges' with vid not None Iterate on all edges connected to a given vertex. If vid is None (default), iterate on all edges in the graph args: - vid (int): vertex holdings edges, default (None) return: - (iter of int): iterator on edge ids Number of edges connected to a given vertex. If vid is None (default), total number of edges in the graph args: - vid (int): vertex holdings edges, default (None) return: - (int) Iterate on all edges pointing to a given vertex. args: - vid (int): vertex target of edges return: - (iter of int): iterator on edge ids Iterate on all edges away from a given vertex. args: - vid (int): vertex source of edges return: - (iter of int): iterator on edge ids Number of edges pointing to a given vertex. args: - vid (int): vertex target of edges return: - (int) Number of edges away from a given vertex. args: - vid (int): vertex source of edges return: - (int) # ########################################################## # # Mutable Vertex Graph concept # # ########################################################## Add a vertex to the graph. If vid is not provided create a new vid args: - vid (int): id to use. If None (default) will generate a new one return: - vid (int): id used for the new vertex Remove a specified vertex of the graph. Also remove all edge attached to it. args: - vid (int): id of vertex to remove Remove all vertices and edges don't change references to objects # ########################################################## # # Mutable Edge Graph concept # # ########################################################## Add an edge to the graph. If eid is not provided generate a new one. args: - sid (int): id of source vertex - tid (int): id of target vertex - eid (int): id to use. If None (default) will generate a new one return: - eid (int): id used for new edge Remove a specified edge from the graph. args: - eid (int): id of edge to remove Remove all the edges of the graph don't change references to objects # ########################################################## # # Extend Graph concept # # ########################################################## Add the specified graph to self, create new vid and eid args: - graph (Graph): the graph to add return: - (dict of (int, int)): mapping between vertex id in graph and vertex id in extended self - (dict of (int, int)): mapping between edge id in graph and edge id in extended self # vertex adding # edge adding # from copy import deepcopy # vids = set(vids) # # result = deepcopy(self) # result._vertices.clear() # result._edges.clear() # # for key, edges in self._vertices.items(): # if key in vids: # inedges, outedges = edges # sortedinedges = set( # [eid for eid in inedges if self.source(eid) in vids]) # sortedoutedges = set( # [eid for eid in outedges if self.target(eid) in vids]) # result._vertices.add((sortedinedges, sortedoutedges), key) # for eid in sortedoutedges: # result._edges.add(self._edges[eid], eid) # # return result | 2.900468 | 3 |
nets/mobilenet_v2_ssd.py | GT-AcerZhang/PaddlePaddle-SSD | 47 | 9402 | import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
class MobileNetV2SSD:
def __init__(self, img, num_classes, img_shape):
self.img = img
self.num_classes = num_classes
self.img_shape = img_shape
def ssd_net(self, scale=1.0):
# 300x300
bottleneck_params_list = [(1, 16, 1, 1),
(6, 24, 2, 2),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)]
# conv1
input = self.conv_bn_layer(input=self.img,
num_filters=int(32 * scale),
filter_size=3,
stride=2,
padding=1,
if_act=True)
# bottleneck sequences
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s)
in_c = int(c * scale)
# 19x19
module11 = input
tmp = self.invresi_blocks(input=input, in_c=in_c, t=6, c=int(160 * scale), n=3, s=2)
# 10x10
module13 = self.invresi_blocks(input=tmp, in_c=int(160 * scale), t=6, c=int(320 * scale), n=1, s=1)
module14 = self.extra_block(module13, 256, 512, 1)
# 5x5
module15 = self.extra_block(module14, 128, 256, 1)
# 3x3
module16 = self.extra_block(module15, 128, 256, 1)
# 2x2
module17 = self.extra_block(module16, 64, 128, 1)
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=[module11, module13, module14, module15, module16, module17],
image=self.img,
num_classes=self.num_classes,
min_ratio=20,
max_ratio=90,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]],
base_size=self.img_shape[2],
offset=0.5,
flip=True)
return mbox_locs, mbox_confs, box, box_var
def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True,
use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn = fluid.layers.batch_norm(input=conv)
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
padding,
expansion_factor):
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = self.conv_bn_layer(input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
bottleneck_conv = self.conv_bn_layer(input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
if_act=True,
use_cudnn=False)
linear_out = self.conv_bn_layer(input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=False)
if ifshortcut:
out = self.shortcut(input=input, data_residual=linear_out)
return out
else:
return linear_out
def invresi_blocks(self, input, in_c, t, c, n, s):
first_block = self.inverted_residual_unit(input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t)
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t)
return last_residual_block
def conv_bn(self, input, filter_size, num_filters, stride, padding, num_groups=1, act='relu', use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def extra_block(self, input, num_filters1, num_filters2, num_groups):
# 1x1 conv
pointwise_conv = self.conv_bn(input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0)
# 3x3 conv
normal_conv = self.conv_bn(input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1)
return normal_conv
def build_ssd(img, num_classes, img_shape):
ssd_model = MobileNetV2SSD(img, num_classes, img_shape)
return ssd_model.ssd_net()
if __name__ == '__main__':
data = fluid.data(name='data', shape=[None, 3, 300, 300])
build_ssd(data, 21, img_shape=[3, 300, 300])
| import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
class MobileNetV2SSD:
def __init__(self, img, num_classes, img_shape):
self.img = img
self.num_classes = num_classes
self.img_shape = img_shape
def ssd_net(self, scale=1.0):
# 300x300
bottleneck_params_list = [(1, 16, 1, 1),
(6, 24, 2, 2),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)]
# conv1
input = self.conv_bn_layer(input=self.img,
num_filters=int(32 * scale),
filter_size=3,
stride=2,
padding=1,
if_act=True)
# bottleneck sequences
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s)
in_c = int(c * scale)
# 19x19
module11 = input
tmp = self.invresi_blocks(input=input, in_c=in_c, t=6, c=int(160 * scale), n=3, s=2)
# 10x10
module13 = self.invresi_blocks(input=tmp, in_c=int(160 * scale), t=6, c=int(320 * scale), n=1, s=1)
module14 = self.extra_block(module13, 256, 512, 1)
# 5x5
module15 = self.extra_block(module14, 128, 256, 1)
# 3x3
module16 = self.extra_block(module15, 128, 256, 1)
# 2x2
module17 = self.extra_block(module16, 64, 128, 1)
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=[module11, module13, module14, module15, module16, module17],
image=self.img,
num_classes=self.num_classes,
min_ratio=20,
max_ratio=90,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]],
base_size=self.img_shape[2],
offset=0.5,
flip=True)
return mbox_locs, mbox_confs, box, box_var
def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True,
use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn = fluid.layers.batch_norm(input=conv)
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
padding,
expansion_factor):
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = self.conv_bn_layer(input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
bottleneck_conv = self.conv_bn_layer(input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
if_act=True,
use_cudnn=False)
linear_out = self.conv_bn_layer(input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=False)
if ifshortcut:
out = self.shortcut(input=input, data_residual=linear_out)
return out
else:
return linear_out
def invresi_blocks(self, input, in_c, t, c, n, s):
first_block = self.inverted_residual_unit(input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t)
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t)
return last_residual_block
def conv_bn(self, input, filter_size, num_filters, stride, padding, num_groups=1, act='relu', use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def extra_block(self, input, num_filters1, num_filters2, num_groups):
# 1x1 conv
pointwise_conv = self.conv_bn(input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0)
# 3x3 conv
normal_conv = self.conv_bn(input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1)
return normal_conv
def build_ssd(img, num_classes, img_shape):
ssd_model = MobileNetV2SSD(img, num_classes, img_shape)
return ssd_model.ssd_net()
if __name__ == '__main__':
data = fluid.data(name='data', shape=[None, 3, 300, 300])
build_ssd(data, 21, img_shape=[3, 300, 300])
| en | 0.385212 | # 300x300 # conv1 # bottleneck sequences # 19x19 # 10x10 # 5x5 # 3x3 # 2x2 # 1x1 conv # 3x3 conv | 2.411921 | 2 |
oneflow/python/test/ops/test_object_bbox_scale.py | caishenghang/oneflow | 2 | 9403 | <gh_stars>1-10
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import random
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _random_sample_images(anno_file, image_dir, batch_size):
from pycocotools.coco import COCO
image_files = []
image_ids = []
batch_group_id = -1
coco = COCO(anno_file)
img_ids = coco.getImgIds()
while len(image_files) < batch_size:
rand_img_id = random.choice(img_ids)
img_h = coco.imgs[rand_img_id]["height"]
img_w = coco.imgs[rand_img_id]["width"]
group_id = int(img_h / img_w)
if batch_group_id == -1:
batch_group_id = group_id
if group_id != batch_group_id:
continue
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"]))
image_ids.append(rand_img_id)
assert len(image_files) == len(image_ids)
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
bbox_list = _get_images_bbox_list(coco, image_ids)
return images, bbox_list
def _get_images_bbox_list(coco, image_ids):
bbox_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
anno_ids = list(
filter(lambda anno_id: coco.anns[anno_id]["iscrowd"] == 0, anno_ids)
)
bbox_array = np.array(
[coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single
)
bbox_list.append(bbox_array)
return bbox_list
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
bbox_shape = _get_bbox_static_shape(bbox_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def target_resize_bbox_scale_job(
image_def: oft.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
bbox_def: oft.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
):
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
)
return scaled_bbox_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
output_bbox_list, output_image_size = target_resize_bbox_scale_job(
[input_image_list], [input_bbox_list]
).get()
return output_bbox_list.numpy_lists()[0], output_image_size.numpy_list()[0]
def _compare_bbox_scale(
test_case,
anno_file,
image_dir,
batch_size,
target_size,
max_size,
print_debug_info=False,
):
images, bbox_list = _random_sample_images(anno_file, image_dir, batch_size)
of_bbox_list, image_size_list = _of_target_resize_bbox_scale(
images, bbox_list, target_size, max_size
)
for image, bbox, of_bbox, image_size in zip(
images, bbox_list, of_bbox_list, image_size_list
):
w, h = image_size
oh, ow = image.shape[0:2]
scale_h = h / oh
scale_w = w / ow
bbox[:, 0] *= scale_w
bbox[:, 1] *= scale_h
bbox[:, 2] *= scale_w
bbox[:, 3] *= scale_h
test_case.assertTrue(np.allclose(bbox, of_bbox))
@flow.unittest.skip_unless_1n1d()
class TestObjectBboxScale(flow.unittest.TestCase):
def test_object_bbox_scale(test_case):
_compare_bbox_scale(
test_case,
"/dataset/mscoco_2017/annotations/instances_val2017.json",
"/dataset/mscoco_2017/val2017",
4,
800,
1333,
)
if __name__ == "__main__":
unittest.main()
| """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import random
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _random_sample_images(anno_file, image_dir, batch_size):
from pycocotools.coco import COCO
image_files = []
image_ids = []
batch_group_id = -1
coco = COCO(anno_file)
img_ids = coco.getImgIds()
while len(image_files) < batch_size:
rand_img_id = random.choice(img_ids)
img_h = coco.imgs[rand_img_id]["height"]
img_w = coco.imgs[rand_img_id]["width"]
group_id = int(img_h / img_w)
if batch_group_id == -1:
batch_group_id = group_id
if group_id != batch_group_id:
continue
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"]))
image_ids.append(rand_img_id)
assert len(image_files) == len(image_ids)
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
bbox_list = _get_images_bbox_list(coco, image_ids)
return images, bbox_list
def _get_images_bbox_list(coco, image_ids):
bbox_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
anno_ids = list(
filter(lambda anno_id: coco.anns[anno_id]["iscrowd"] == 0, anno_ids)
)
bbox_array = np.array(
[coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single
)
bbox_list.append(bbox_array)
return bbox_list
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
bbox_shape = _get_bbox_static_shape(bbox_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def target_resize_bbox_scale_job(
image_def: oft.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
bbox_def: oft.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
):
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
)
return scaled_bbox_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
output_bbox_list, output_image_size = target_resize_bbox_scale_job(
[input_image_list], [input_bbox_list]
).get()
return output_bbox_list.numpy_lists()[0], output_image_size.numpy_list()[0]
def _compare_bbox_scale(
test_case,
anno_file,
image_dir,
batch_size,
target_size,
max_size,
print_debug_info=False,
):
images, bbox_list = _random_sample_images(anno_file, image_dir, batch_size)
of_bbox_list, image_size_list = _of_target_resize_bbox_scale(
images, bbox_list, target_size, max_size
)
for image, bbox, of_bbox, image_size in zip(
images, bbox_list, of_bbox_list, image_size_list
):
w, h = image_size
oh, ow = image.shape[0:2]
scale_h = h / oh
scale_w = w / ow
bbox[:, 0] *= scale_w
bbox[:, 1] *= scale_h
bbox[:, 2] *= scale_w
bbox[:, 3] *= scale_h
test_case.assertTrue(np.allclose(bbox, of_bbox))
@flow.unittest.skip_unless_1n1d()
class TestObjectBboxScale(flow.unittest.TestCase):
def test_object_bbox_scale(test_case):
_compare_bbox_scale(
test_case,
"/dataset/mscoco_2017/annotations/instances_val2017.json",
"/dataset/mscoco_2017/val2017",
4,
800,
1333,
)
if __name__ == "__main__":
unittest.main() | en | 0.864155 | Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.950223 | 2 |
vagrant/kafka/bin/init.py | BertRaeymaekers/scrapbook | 0 | 9404 | #! /usr/bin/env python3
import json
import os.path
import jinja2
DEFAULT_PARAMS = {
"ansible_user": "vagrant"
}
if __name__ == "__main__":
# Reading configuration
here = os.path.dirname(os.path.realpath(__file__ + "/../"))
with open(here + "/config.json", "r") as rf:
config = json.load(rf)
print(json.dumps(config, sort_keys=True, indent=4))
# Generating an inventory file
with open(here + "/playbook/inventory/hosts", "w") as inventory:
inventory.write("[kafka]\n")
for host in config["hosts"]:
# Setting default values and updating them when more specific.
params = dict()
params.update(DEFAULT_PARAMS)
params.update(config["params"])
params.update(config["hosts"][host])
# Setting some extra ansible paramters.
params["ansible_ssh_host"] = params["ip"]
inventory.write("%s\t%s\n" % (host, " ".join(("%s=%s" % (k,v) for k,v in params.items()))))
# Generating the Vagrantfile
env = jinja2.Environment(loader=jinja2.FileSystemLoader(here + "/templates/"))
template = env.get_template('Vagrantfile.j2')
template.stream(**config).dump(here + '/vagrant/Vagrantfile')
# Generating group vars for kafka
with open(here + "/playbook/group_vars/kafka.yml", "w") as gv:
gv.write("---\n")
gv.write("hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" %s: '%s.%s'\n" % (params["ip"], params["hostname"], config["params"]["domain" ]))
gv.write("kafka:\n")
gv.write(" hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" - %s.%s\n" % (params["hostname"], config["params"]["domain" ]))
| #! /usr/bin/env python3
import json
import os.path
import jinja2
DEFAULT_PARAMS = {
"ansible_user": "vagrant"
}
if __name__ == "__main__":
# Reading configuration
here = os.path.dirname(os.path.realpath(__file__ + "/../"))
with open(here + "/config.json", "r") as rf:
config = json.load(rf)
print(json.dumps(config, sort_keys=True, indent=4))
# Generating an inventory file
with open(here + "/playbook/inventory/hosts", "w") as inventory:
inventory.write("[kafka]\n")
for host in config["hosts"]:
# Setting default values and updating them when more specific.
params = dict()
params.update(DEFAULT_PARAMS)
params.update(config["params"])
params.update(config["hosts"][host])
# Setting some extra ansible paramters.
params["ansible_ssh_host"] = params["ip"]
inventory.write("%s\t%s\n" % (host, " ".join(("%s=%s" % (k,v) for k,v in params.items()))))
# Generating the Vagrantfile
env = jinja2.Environment(loader=jinja2.FileSystemLoader(here + "/templates/"))
template = env.get_template('Vagrantfile.j2')
template.stream(**config).dump(here + '/vagrant/Vagrantfile')
# Generating group vars for kafka
with open(here + "/playbook/group_vars/kafka.yml", "w") as gv:
gv.write("---\n")
gv.write("hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" %s: '%s.%s'\n" % (params["ip"], params["hostname"], config["params"]["domain" ]))
gv.write("kafka:\n")
gv.write(" hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" - %s.%s\n" % (params["hostname"], config["params"]["domain" ]))
| en | 0.482628 | #! /usr/bin/env python3 # Reading configuration # Generating an inventory file # Setting default values and updating them when more specific. # Setting some extra ansible paramters. # Generating the Vagrantfile # Generating group vars for kafka | 2.40611 | 2 |
harvest/models/beastsimulator.py | lmaurits/harvest | 1 | 9405 | <reponame>lmaurits/harvest
import os
import harvest.dataframe
from harvest.models.simulator import Simulator
class BeastSimulator(Simulator):
def __init__(self, tree, n_features):
Simulator.__init__(self, tree, n_features)
def generate_beast_xml(self):
# Subclasses should implement this
return None
def generate_data(self):
# Generate BEAST XML file to do simulation
xml = self.generate_beast_xml()
temp_filename = xml.write_file(overwrite=True)
# Run BEAST simulation
os.system("beast %s > /dev/null" % temp_filename)
# Delete BEAST XML file
os.remove(temp_filename)
# Read simulated data
data = harvest.dataframe.read_from_beast_xml(xml.output_filename)
# Delete simualted data
os.remove(xml.output_filename)
self.data = data
self.data.datatype = self.datatype
| import os
import harvest.dataframe
from harvest.models.simulator import Simulator
class BeastSimulator(Simulator):
def __init__(self, tree, n_features):
Simulator.__init__(self, tree, n_features)
def generate_beast_xml(self):
# Subclasses should implement this
return None
def generate_data(self):
# Generate BEAST XML file to do simulation
xml = self.generate_beast_xml()
temp_filename = xml.write_file(overwrite=True)
# Run BEAST simulation
os.system("beast %s > /dev/null" % temp_filename)
# Delete BEAST XML file
os.remove(temp_filename)
# Read simulated data
data = harvest.dataframe.read_from_beast_xml(xml.output_filename)
# Delete simualted data
os.remove(xml.output_filename)
self.data = data
self.data.datatype = self.datatype | en | 0.758773 | # Subclasses should implement this # Generate BEAST XML file to do simulation # Run BEAST simulation # Delete BEAST XML file # Read simulated data # Delete simualted data | 2.762112 | 3 |
assimilator.py | DutChen18/slime-clusters-cuda | 0 | 9406 | # pylint: skip-file
import os
from assimilator import *
from Boinc import boinc_project_path
class SlimeClustersAssimilator(Assimilator):
def __init__(self):
Assimilator.__init__(self)
def assimilate_handler(self, wu, results, canonical_result):
if canonical_result == None:
return
src_file = self.get_file_path(canonical_result)
dst_dir = boinc_project_path.project_path('slime-clusters')
dst_file = os.path.join(dst_dir, 'results.txt')
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(src_file, 'r') as src, open(dst_file, 'a') as dst:
dst.writelines(src.readlines())
if __name__ == "__main__":
SlimeClustersAssimilator().run() | # pylint: skip-file
import os
from assimilator import *
from Boinc import boinc_project_path
class SlimeClustersAssimilator(Assimilator):
def __init__(self):
Assimilator.__init__(self)
def assimilate_handler(self, wu, results, canonical_result):
if canonical_result == None:
return
src_file = self.get_file_path(canonical_result)
dst_dir = boinc_project_path.project_path('slime-clusters')
dst_file = os.path.join(dst_dir, 'results.txt')
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(src_file, 'r') as src, open(dst_file, 'a') as dst:
dst.writelines(src.readlines())
if __name__ == "__main__":
SlimeClustersAssimilator().run() | en | 0.409619 | # pylint: skip-file | 2.320321 | 2 |
modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py | Rubtsowa/modin | 0 | 9407 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that implements ``PandasOnRayDataframe`` class using cuDF."""
import numpy as np
import ray
from ..partitioning.partition import cuDFOnRayDataframePartition
from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import (
PandasOnRayDataframe,
)
from modin.error_message import ErrorMessage
class cuDFOnRayDataframe(PandasOnRayDataframe):
"""
The class implements the interface in ``PandasOnRayDataframe`` using cuDF.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = cuDFOnRayDataframePartitionManager
def synchronize_labels(self, axis=None):
"""
Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly.
Parameters
----------
axis : {0, 1, None}, default: None
The axis to apply to. If None, it applies to both axes.
"""
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
cum_row_lengths = np.cumsum([0] + self._row_lengths)
cum_col_widths = np.cumsum([0] + self._column_widths)
def apply_idx_objs(df, idx, cols, axis):
# cudf does not support set_axis. It only supports rename with 1-to-1 mapping.
# Therefore, we need to create the dictionary that have the relationship between
# current index and new ones.
idx = {df.index[i]: idx[i] for i in range(len(idx))}
cols = {df.index[i]: cols[i] for i in range(len(cols))}
if axis == 0:
return df.rename(index=idx)
elif axis == 1:
return df.rename(columns=cols)
else:
return df.rename(index=idx, columns=cols)
keys = np.array(
[
[
self._partitions[i][j].apply(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
axis=axis,
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._partitions = np.array(
[
[
cuDFOnRayDataframePartition(
self._partitions[i][j].get_gpu_manager(),
keys[i][j],
self._partitions[i][j]._length_cache,
self._partitions[i][j]._width_cache,
)
for j in range(len(keys[i]))
]
for i in range(len(keys))
]
)
def mask(
self,
row_indices=None,
row_numeric_idx=None,
col_indices=None,
col_numeric_idx=None,
):
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_indices : list of hashable, optional
The row labels to extract.
row_numeric_idx : list of int, optional
The row indices to extract.
col_indices : list of hashable, optional
The column labels to extract.
col_numeric_idx : list of int, optional
The column indices to extract.
Returns
-------
cuDFOnRayDataframe
A new ``cuDFOnRayDataframe`` from the mask provided.
Notes
-----
If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used.
The same rule applied to `col_indices` and `col_numeric_idx`.
"""
if isinstance(row_numeric_idx, slice) and (
row_numeric_idx == slice(None) or row_numeric_idx == slice(0, None)
):
row_numeric_idx = None
if isinstance(col_numeric_idx, slice) and (
col_numeric_idx == slice(None) or col_numeric_idx == slice(0, None)
):
col_numeric_idx = None
if (
row_indices is None
and row_numeric_idx is None
and col_indices is None
and col_numeric_idx is None
):
return self.copy()
if row_indices is not None:
row_numeric_idx = self.index.get_indexer_for(row_indices)
if row_numeric_idx is not None:
row_partitions_list = self._get_dict_of_block_index(0, row_numeric_idx)
if isinstance(row_numeric_idx, slice):
# Row lengths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_row_lengths = [
len(range(*idx.indices(self._row_lengths[p])))
for p, idx in row_partitions_list.items()
]
# Use the slice to calculate the new row index
new_index = self.index[row_numeric_idx]
else:
new_row_lengths = [len(idx) for _, idx in row_partitions_list.items()]
new_index = self.index[sorted(row_numeric_idx)]
else:
row_partitions_list = {
i: slice(None) for i in range(len(self._row_lengths))
}
new_row_lengths = self._row_lengths
new_index = self.index
if col_indices is not None:
col_numeric_idx = self.columns.get_indexer_for(col_indices)
if col_numeric_idx is not None:
col_partitions_list = self._get_dict_of_block_index(1, col_numeric_idx)
if isinstance(col_numeric_idx, slice):
# Column widths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_col_widths = [
len(range(*idx.indices(self._column_widths[p])))
for p, idx in col_partitions_list.items()
]
# Use the slice to calculate the new columns
new_columns = self.columns[col_numeric_idx]
assert sum(new_col_widths) == len(
new_columns
), "{} != {}.\n{}\n{}\n{}".format(
sum(new_col_widths),
len(new_columns),
col_numeric_idx,
self._column_widths,
col_partitions_list,
)
if self._dtypes is not None:
new_dtypes = self.dtypes[col_numeric_idx]
else:
new_dtypes = None
else:
new_col_widths = [len(idx) for _, idx in col_partitions_list.items()]
new_columns = self.columns[sorted(col_numeric_idx)]
if self._dtypes is not None:
new_dtypes = self.dtypes.iloc[sorted(col_numeric_idx)]
else:
new_dtypes = None
else:
col_partitions_list = {
i: slice(None) for i in range(len(self._column_widths))
}
new_col_widths = self._column_widths
new_columns = self.columns
if self._dtypes is not None:
new_dtypes = self.dtypes
else:
new_dtypes = None
key_and_gpus = np.array(
[
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
),
self._partitions[row_idx][col_idx].get_gpu_manager(),
]
for col_idx, col_internal_indices in col_partitions_list.items()
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list.items()
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
shape = key_and_gpus.shape[:2]
keys = ray.get(key_and_gpus[:, :, 0].flatten().tolist())
gpu_managers = key_and_gpus[:, :, 1].flatten().tolist()
new_partitions = self._partition_mgr_cls._create_partitions(
keys, gpu_managers
).reshape(shape)
intermediate = self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
# Check if monotonically increasing, return if it is. Fast track code path for
# common case to keep it fast.
if (
row_numeric_idx is None
or isinstance(row_numeric_idx, slice)
or len(row_numeric_idx) == 1
or np.all(row_numeric_idx[1:] >= row_numeric_idx[:-1])
) and (
col_numeric_idx is None
or isinstance(col_numeric_idx, slice)
or len(col_numeric_idx) == 1
or np.all(col_numeric_idx[1:] >= col_numeric_idx[:-1])
):
return intermediate
# The new labels are often smaller than the old labels, so we can't reuse the
# original order values because those were mapped to the original data. We have
# to reorder here based on the expected order from within the data.
# We create a dictionary mapping the position of the numeric index with respect
# to all others, then recreate that order by mapping the new order values from
# the old. This information is sent to `_reorder_labels`.
if row_numeric_idx is not None:
row_order_mapping = dict(
zip(sorted(row_numeric_idx), range(len(row_numeric_idx)))
)
new_row_order = [row_order_mapping[idx] for idx in row_numeric_idx]
else:
new_row_order = None
if col_numeric_idx is not None:
col_order_mapping = dict(
zip(sorted(col_numeric_idx), range(len(col_numeric_idx)))
)
new_col_order = [col_order_mapping[idx] for idx in col_numeric_idx]
else:
new_col_order = None
return intermediate._reorder_labels(
row_numeric_idx=new_row_order, col_numeric_idx=new_col_order
)
| # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that implements ``PandasOnRayDataframe`` class using cuDF."""
import numpy as np
import ray
from ..partitioning.partition import cuDFOnRayDataframePartition
from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import (
PandasOnRayDataframe,
)
from modin.error_message import ErrorMessage
class cuDFOnRayDataframe(PandasOnRayDataframe):
"""
The class implements the interface in ``PandasOnRayDataframe`` using cuDF.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = cuDFOnRayDataframePartitionManager
def synchronize_labels(self, axis=None):
"""
Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly.
Parameters
----------
axis : {0, 1, None}, default: None
The axis to apply to. If None, it applies to both axes.
"""
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
cum_row_lengths = np.cumsum([0] + self._row_lengths)
cum_col_widths = np.cumsum([0] + self._column_widths)
def apply_idx_objs(df, idx, cols, axis):
# cudf does not support set_axis. It only supports rename with 1-to-1 mapping.
# Therefore, we need to create the dictionary that have the relationship between
# current index and new ones.
idx = {df.index[i]: idx[i] for i in range(len(idx))}
cols = {df.index[i]: cols[i] for i in range(len(cols))}
if axis == 0:
return df.rename(index=idx)
elif axis == 1:
return df.rename(columns=cols)
else:
return df.rename(index=idx, columns=cols)
keys = np.array(
[
[
self._partitions[i][j].apply(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
axis=axis,
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._partitions = np.array(
[
[
cuDFOnRayDataframePartition(
self._partitions[i][j].get_gpu_manager(),
keys[i][j],
self._partitions[i][j]._length_cache,
self._partitions[i][j]._width_cache,
)
for j in range(len(keys[i]))
]
for i in range(len(keys))
]
)
def mask(
self,
row_indices=None,
row_numeric_idx=None,
col_indices=None,
col_numeric_idx=None,
):
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_indices : list of hashable, optional
The row labels to extract.
row_numeric_idx : list of int, optional
The row indices to extract.
col_indices : list of hashable, optional
The column labels to extract.
col_numeric_idx : list of int, optional
The column indices to extract.
Returns
-------
cuDFOnRayDataframe
A new ``cuDFOnRayDataframe`` from the mask provided.
Notes
-----
If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used.
The same rule applied to `col_indices` and `col_numeric_idx`.
"""
if isinstance(row_numeric_idx, slice) and (
row_numeric_idx == slice(None) or row_numeric_idx == slice(0, None)
):
row_numeric_idx = None
if isinstance(col_numeric_idx, slice) and (
col_numeric_idx == slice(None) or col_numeric_idx == slice(0, None)
):
col_numeric_idx = None
if (
row_indices is None
and row_numeric_idx is None
and col_indices is None
and col_numeric_idx is None
):
return self.copy()
if row_indices is not None:
row_numeric_idx = self.index.get_indexer_for(row_indices)
if row_numeric_idx is not None:
row_partitions_list = self._get_dict_of_block_index(0, row_numeric_idx)
if isinstance(row_numeric_idx, slice):
# Row lengths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_row_lengths = [
len(range(*idx.indices(self._row_lengths[p])))
for p, idx in row_partitions_list.items()
]
# Use the slice to calculate the new row index
new_index = self.index[row_numeric_idx]
else:
new_row_lengths = [len(idx) for _, idx in row_partitions_list.items()]
new_index = self.index[sorted(row_numeric_idx)]
else:
row_partitions_list = {
i: slice(None) for i in range(len(self._row_lengths))
}
new_row_lengths = self._row_lengths
new_index = self.index
if col_indices is not None:
col_numeric_idx = self.columns.get_indexer_for(col_indices)
if col_numeric_idx is not None:
col_partitions_list = self._get_dict_of_block_index(1, col_numeric_idx)
if isinstance(col_numeric_idx, slice):
# Column widths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_col_widths = [
len(range(*idx.indices(self._column_widths[p])))
for p, idx in col_partitions_list.items()
]
# Use the slice to calculate the new columns
new_columns = self.columns[col_numeric_idx]
assert sum(new_col_widths) == len(
new_columns
), "{} != {}.\n{}\n{}\n{}".format(
sum(new_col_widths),
len(new_columns),
col_numeric_idx,
self._column_widths,
col_partitions_list,
)
if self._dtypes is not None:
new_dtypes = self.dtypes[col_numeric_idx]
else:
new_dtypes = None
else:
new_col_widths = [len(idx) for _, idx in col_partitions_list.items()]
new_columns = self.columns[sorted(col_numeric_idx)]
if self._dtypes is not None:
new_dtypes = self.dtypes.iloc[sorted(col_numeric_idx)]
else:
new_dtypes = None
else:
col_partitions_list = {
i: slice(None) for i in range(len(self._column_widths))
}
new_col_widths = self._column_widths
new_columns = self.columns
if self._dtypes is not None:
new_dtypes = self.dtypes
else:
new_dtypes = None
key_and_gpus = np.array(
[
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
),
self._partitions[row_idx][col_idx].get_gpu_manager(),
]
for col_idx, col_internal_indices in col_partitions_list.items()
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list.items()
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
shape = key_and_gpus.shape[:2]
keys = ray.get(key_and_gpus[:, :, 0].flatten().tolist())
gpu_managers = key_and_gpus[:, :, 1].flatten().tolist()
new_partitions = self._partition_mgr_cls._create_partitions(
keys, gpu_managers
).reshape(shape)
intermediate = self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
# Check if monotonically increasing, return if it is. Fast track code path for
# common case to keep it fast.
if (
row_numeric_idx is None
or isinstance(row_numeric_idx, slice)
or len(row_numeric_idx) == 1
or np.all(row_numeric_idx[1:] >= row_numeric_idx[:-1])
) and (
col_numeric_idx is None
or isinstance(col_numeric_idx, slice)
or len(col_numeric_idx) == 1
or np.all(col_numeric_idx[1:] >= col_numeric_idx[:-1])
):
return intermediate
# The new labels are often smaller than the old labels, so we can't reuse the
# original order values because those were mapped to the original data. We have
# to reorder here based on the expected order from within the data.
# We create a dictionary mapping the position of the numeric index with respect
# to all others, then recreate that order by mapping the new order values from
# the old. This information is sent to `_reorder_labels`.
if row_numeric_idx is not None:
row_order_mapping = dict(
zip(sorted(row_numeric_idx), range(len(row_numeric_idx)))
)
new_row_order = [row_order_mapping[idx] for idx in row_numeric_idx]
else:
new_row_order = None
if col_numeric_idx is not None:
col_order_mapping = dict(
zip(sorted(col_numeric_idx), range(len(col_numeric_idx)))
)
new_col_order = [col_order_mapping[idx] for idx in col_numeric_idx]
else:
new_col_order = None
return intermediate._reorder_labels(
row_numeric_idx=new_row_order, col_numeric_idx=new_col_order
)
| en | 0.803408 | # Licensed to Modin Development Team under one or more contributor license agreements. # See the NOTICE file distributed with this work for additional information regarding # copyright ownership. The Modin Development Team licenses this file to you under the # Apache License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. Module houses class that implements ``PandasOnRayDataframe`` class using cuDF. The class implements the interface in ``PandasOnRayDataframe`` using cuDF. Parameters ---------- partitions : np.ndarray A 2D NumPy array of partitions. index : sequence The index for the dataframe. Converted to a ``pandas.Index``. columns : sequence The columns object for the dataframe. Converted to a ``pandas.Index``. row_lengths : list, optional The length of each partition in the rows. The "height" of each of the block partitions. Is computed if not provided. column_widths : list, optional The width of each partition in the columns. The "width" of each of the block partitions. Is computed if not provided. dtypes : pandas.Series, optional The data types for the dataframe columns. Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly. Parameters ---------- axis : {0, 1, None}, default: None The axis to apply to. If None, it applies to both axes. # cudf does not support set_axis. It only supports rename with 1-to-1 mapping. # Therefore, we need to create the dictionary that have the relationship between # current index and new ones. Lazily select columns or rows from given indices. Parameters ---------- row_indices : list of hashable, optional The row labels to extract. row_numeric_idx : list of int, optional The row indices to extract. col_indices : list of hashable, optional The column labels to extract. col_numeric_idx : list of int, optional The column indices to extract. Returns ------- cuDFOnRayDataframe A new ``cuDFOnRayDataframe`` from the mask provided. Notes ----- If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used. The same rule applied to `col_indices` and `col_numeric_idx`. # Row lengths for slice are calculated as the length of the slice # on the partition. Often this will be the same length as the current # length, but sometimes it is different, thus the extra calculation. # Use the slice to calculate the new row index # Column widths for slice are calculated as the length of the slice # on the partition. Often this will be the same length as the current # length, but sometimes it is different, thus the extra calculation. # Use the slice to calculate the new columns # Check if monotonically increasing, return if it is. Fast track code path for # common case to keep it fast. # The new labels are often smaller than the old labels, so we can't reuse the # original order values because those were mapped to the original data. We have # to reorder here based on the expected order from within the data. # We create a dictionary mapping the position of the numeric index with respect # to all others, then recreate that order by mapping the new order values from # the old. This information is sent to `_reorder_labels`. | 1.90013 | 2 |
Exoplanet_Population.py | mw5868/University | 0 | 9408 | from astropy.table import Table, Column
import matplotlib.pyplot as plt
#url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=pl_hostname,ra,dec&order=dec&format=csv"
url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets"
# This API returns Hostname, RA and Dec
t = Table.read(url, format="csv")
t_b = t[t["pl_letter"] == "b"]
t_c = t[t["pl_letter"] == "c"]
t_d = t[t["pl_letter"] == "d"]
t_e = t[t["pl_letter"] == "e"]
t_f = t[t["pl_letter"] == "f"]
t_g = t[t["pl_letter"] == "g"]
t_h = t[t["pl_letter"] == "h"]
t_i = t[t["pl_letter"] == "i"]
fig = plt.figure()
ax = fig.add_subplot(1,1,1,aspect="equal")
ax.scatter(t_b["ra"],t_b["dec"],color="Black",label = "2 Planets")
ax.scatter(t_c["ra"],t_c["dec"],color="red", label = "3 Planets")
ax.scatter(t_d["ra"],t_d["dec"],color="blue", label = "4 Planets")
ax.scatter(t_e["ra"],t_e["dec"],color="green", label = "5 Planets")
ax.scatter(t_f["ra"],t_f["dec"],color="yellow", label = "6 Planets")
ax.scatter(t_g["ra"],t_g["dec"],color="purple", label = "7 Planets")
ax.scatter(t_h["ra"],t_h["dec"],color="orange", label = "8 Planets")
ax.scatter(t_i["ra"],t_i["dec"],color="cyan", label = "9 Planets")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xlim(360,0)
ax.set_ylim(-90,90)
ax.set_ylabel("DEC")
ax.set_xlabel("RA")
ax.set_title("Positions of Explanets by number of planets in system")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | from astropy.table import Table, Column
import matplotlib.pyplot as plt
#url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=pl_hostname,ra,dec&order=dec&format=csv"
url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets"
# This API returns Hostname, RA and Dec
t = Table.read(url, format="csv")
t_b = t[t["pl_letter"] == "b"]
t_c = t[t["pl_letter"] == "c"]
t_d = t[t["pl_letter"] == "d"]
t_e = t[t["pl_letter"] == "e"]
t_f = t[t["pl_letter"] == "f"]
t_g = t[t["pl_letter"] == "g"]
t_h = t[t["pl_letter"] == "h"]
t_i = t[t["pl_letter"] == "i"]
fig = plt.figure()
ax = fig.add_subplot(1,1,1,aspect="equal")
ax.scatter(t_b["ra"],t_b["dec"],color="Black",label = "2 Planets")
ax.scatter(t_c["ra"],t_c["dec"],color="red", label = "3 Planets")
ax.scatter(t_d["ra"],t_d["dec"],color="blue", label = "4 Planets")
ax.scatter(t_e["ra"],t_e["dec"],color="green", label = "5 Planets")
ax.scatter(t_f["ra"],t_f["dec"],color="yellow", label = "6 Planets")
ax.scatter(t_g["ra"],t_g["dec"],color="purple", label = "7 Planets")
ax.scatter(t_h["ra"],t_h["dec"],color="orange", label = "8 Planets")
ax.scatter(t_i["ra"],t_i["dec"],color="cyan", label = "9 Planets")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xlim(360,0)
ax.set_ylim(-90,90)
ax.set_ylabel("DEC")
ax.set_xlabel("RA")
ax.set_title("Positions of Explanets by number of planets in system")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | en | 0.395562 | #url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=pl_hostname,ra,dec&order=dec&format=csv" # This API returns Hostname, RA and Dec | 2.720666 | 3 |
pykuna/errors.py | marthoc/pykuna | 4 | 9409 | <filename>pykuna/errors.py
class KunaError(Exception):
pass
class AuthenticationError(KunaError):
"""Raised when authentication fails."""
pass
class UnauthorizedError(KunaError):
"""Raised when an API call fails as unauthorized (401)."""
pass
| <filename>pykuna/errors.py
class KunaError(Exception):
pass
class AuthenticationError(KunaError):
"""Raised when authentication fails."""
pass
class UnauthorizedError(KunaError):
"""Raised when an API call fails as unauthorized (401)."""
pass
| en | 0.868191 | Raised when authentication fails. Raised when an API call fails as unauthorized (401). | 2.303569 | 2 |
src/pe_problem74.py | henrimitte/Project-Euler | 0 | 9410 | <filename>src/pe_problem74.py
from tools import factorial
def solve():
fa = tuple(factorial(x) for x in range(10))
def _sum_factorial_of_digits(n: int) -> int:
s = 0
while n > 0:
s += fa[n % 10]
n //= 10
return s
limit = 1000000
loops = [0 for x in range(limit)]
for i in range(limit):
if not loops[i]:
loop_not_found = True
chain = [i]
n = i
while loop_not_found:
n = _sum_factorial_of_digits(n)
if n in chain:
loop_not_found = False
else:
chain.append(n)
loops[i] = len(chain)
sixty = sum(filter(lambda v: v == 60, loops)) // 60
print(sixty)
if __name__ == '__main__':
solve()
| <filename>src/pe_problem74.py
from tools import factorial
def solve():
fa = tuple(factorial(x) for x in range(10))
def _sum_factorial_of_digits(n: int) -> int:
s = 0
while n > 0:
s += fa[n % 10]
n //= 10
return s
limit = 1000000
loops = [0 for x in range(limit)]
for i in range(limit):
if not loops[i]:
loop_not_found = True
chain = [i]
n = i
while loop_not_found:
n = _sum_factorial_of_digits(n)
if n in chain:
loop_not_found = False
else:
chain.append(n)
loops[i] = len(chain)
sixty = sum(filter(lambda v: v == 60, loops)) // 60
print(sixty)
if __name__ == '__main__':
solve()
| none | 1 | 3.402893 | 3 |
|
thingsboard_gateway/connectors/modbus/modbus_connector.py | ferguscan/thingsboard-gateway | 0 | 9411 | <filename>thingsboard_gateway/connectors/modbus/modbus_connector.py
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from time import sleep, time
from queue import Queue
from random import choice
from string import ascii_lowercase
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
# Try import Pymodbus library or install it and import
try:
from pymodbus.constants import Defaults
except ImportError:
print("Modbus library not found - installing...")
TBUtility.install_package("pymodbus", ">=2.3.0")
TBUtility.install_package('pyserial')
from pymodbus.constants import Defaults
try:
from twisted.internet import reactor
except ImportError:
TBUtility.install_package('twisted')
from twisted.internet import reactor
from twisted.internet import reactor
from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse
from pymodbus.register_write_message import WriteMultipleRegistersResponse, WriteSingleRegisterResponse
from pymodbus.register_read_message import ReadRegistersResponseBase
from pymodbus.bit_read_message import ReadBitsResponseBase
from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient
from pymodbus.client.sync import ModbusRtuFramer, ModbusSocketFramer, ModbusAsciiFramer
from pymodbus.exceptions import ConnectionException
from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.version import version
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.datastore import ModbusSparseDataBlock
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.modbus.constants import *
from thingsboard_gateway.connectors.modbus.slave import Slave
from thingsboard_gateway.connectors.modbus.backward_compability_adapter import BackwardCompatibilityAdapter
from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter
CONVERTED_DATA_SECTIONS = [ATTRIBUTES_PARAMETER, TELEMETRY_PARAMETER]
FRAMER_TYPE = {
'rtu': ModbusRtuFramer,
'socket': ModbusSocketFramer,
'ascii': ModbusAsciiFramer
}
SLAVE_TYPE = {
'tcp': StartTcpServer,
'udp': StartUdpServer,
'serial': StartSerialServer
}
FUNCTION_TYPE = {
'coils_initializer': 'co',
'holding_registers': 'hr',
'input_registers': 'ir',
'discrete_inputs': 'di'
}
FUNCTION_CODE_WRITE = {
'holding_registers': (6, 16),
'coils_initializer': (5, 15)
}
FUNCTION_CODE_READ = {
'holding_registers': 3,
'coils_initializer': 1,
'input_registers': 4,
'discrete_inputs': 2
}
class ModbusConnector(Connector, Thread):
process_requests = Queue(-1)
def __init__(self, gateway, config, connector_type):
self.statistics = {STATISTIC_MESSAGE_RECEIVED_PARAMETER: 0,
STATISTIC_MESSAGE_SENT_PARAMETER: 0}
super().__init__()
self.__gateway = gateway
self._connector_type = connector_type
self.__backward_compatibility_adapter = BackwardCompatibilityAdapter(config, gateway.get_config_path())
self.__config = self.__backward_compatibility_adapter.convert()
self.setName(self.__config.get("name", 'Modbus Default ' + ''.join(choice(ascii_lowercase) for _ in range(5))))
self.__connected = False
self.__stopped = False
self.daemon = True
if self.__config.get('slave'):
self.__slave_thread = Thread(target=self.__configure_and_run_slave, args=(self.__config['slave'],),
daemon=True, name='Gateway as a slave')
self.__slave_thread.start()
if config['slave'].get('sendDataToThingsBoard', False):
self.__modify_main_config()
self.__slaves = []
self.__load_slaves()
def is_connected(self):
return self.__connected
def open(self):
self.__stopped = False
self.start()
def run(self):
self.__connected = True
while True:
if not self.__stopped and not ModbusConnector.process_requests.empty():
thread = Thread(target=self.__process_slaves, daemon=True)
thread.start()
if self.__stopped:
break
sleep(.2)
@staticmethod
def __configure_and_run_slave(config):
identity = None
if config.get('identity'):
identity = ModbusDeviceIdentification()
identity.VendorName = config['identity'].get('vendorName', '')
identity.ProductCode = config['identity'].get('productCode', '')
identity.VendorUrl = config['identity'].get('vendorUrl', '')
identity.ProductName = config['identity'].get('productName', '')
identity.ModelName = config['identity'].get('ModelName', '')
identity.MajorMinorRevision = version.short()
blocks = {}
for (key, value) in config.get('values').items():
values = {}
converter = BytesModbusDownlinkConverter({})
for item in value:
for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'):
for val in item.get(section, []):
function_code = FUNCTION_CODE_WRITE[key][0] if val['objectsCount'] <= 1 else \
FUNCTION_CODE_WRITE[key][1]
converted_value = converter.convert(
{**val,
'device': config.get('deviceName', 'Gateway'), 'functionCode': function_code,
'byteOrder': config['byteOrder'], 'wordOrder': config['wordOrder']},
{'data': {'params': val['value']}})
values[val['address'] + 1] = converted_value
blocks[FUNCTION_TYPE[key]] = ModbusSparseDataBlock(values)
context = ModbusServerContext(slaves=ModbusSlaveContext(**blocks), single=True)
SLAVE_TYPE[config['type']](context, identity=identity,
address=(config.get('host'), config.get('port')) if (
config['type'] == 'tcp' or 'udp') else None,
port=config.get('port') if config['type'] == 'serial' else None,
framer=FRAMER_TYPE[config['method']])
def __modify_main_config(self):
config = self.__config['slave']
values = config.pop('values')
device = config
for (register, reg_values) in values.items():
for value in reg_values:
for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'):
if not device.get(section):
device[section] = []
for item in value.get(section, []):
device[section].append({**item, 'functionCode': FUNCTION_CODE_READ[
register] if section not in ('attributeUpdates', 'rpc') else item['functionCode']})
self.__config['master']['slaves'].append(device)
def __load_slaves(self):
self.__slaves = [
Slave(**{**device, 'connector': self, 'gateway': self.__gateway, 'callback': ModbusConnector.callback}) for
device in self.__config.get('master', {'slaves': []}).get('slaves', [])]
@classmethod
def callback(cls, slave):
cls.process_requests.put(slave)
@property
def connector_type(self):
return self._connector_type
def __convert_and_save_data(self, config_tuple):
device, current_device_config, config, device_responses = config_tuple
converted_data = {}
try:
converted_data = device.config[UPLINK_PREFIX + CONVERTER_PARAMETER].convert(
config=config,
data=device_responses)
except Exception as e:
log.error(e)
to_send = {DEVICE_NAME_PARAMETER: converted_data[DEVICE_NAME_PARAMETER],
DEVICE_TYPE_PARAMETER: converted_data[DEVICE_TYPE_PARAMETER],
TELEMETRY_PARAMETER: [],
ATTRIBUTES_PARAMETER: []
}
if current_device_config.get('sendDataOnlyOnChange'):
self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1
for converted_data_section in CONVERTED_DATA_SECTIONS:
for current_section_dict in converted_data[converted_data_section]:
for key, value in current_section_dict.items():
if device.config[LAST_PREFIX + converted_data_section].get(key) is None or \
device.config[LAST_PREFIX + converted_data_section][key] != value:
device.config[LAST_PREFIX + converted_data_section][key] = value
to_send[converted_data_section].append({key: value})
elif converted_data and current_device_config.get('sendDataOnlyOnChange') is None or \
not current_device_config.get('sendDataOnlyOnChange'):
self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1
for converted_data_section in CONVERTED_DATA_SECTIONS:
device.config[LAST_PREFIX + converted_data_section] = converted_data[
converted_data_section]
to_send[converted_data_section] = converted_data[converted_data_section]
if to_send.get(ATTRIBUTES_PARAMETER) or to_send.get(TELEMETRY_PARAMETER):
self.__gateway.send_to_storage(self.get_name(), to_send)
self.statistics[STATISTIC_MESSAGE_SENT_PARAMETER] += 1
def close(self):
self.__stopped = True
self.__stop_connections_to_masters()
if reactor.running:
StopServer()
log.info('%s has been stopped.', self.get_name())
def get_name(self):
return self.name
def __process_slaves(self):
# TODO: write documentation
device = ModbusConnector.process_requests.get()
device_responses = {'timeseries': {}, 'attributes': {}}
current_device_config = {}
try:
for config_section in device_responses:
if device.config.get(config_section) is not None:
current_device_config = device.config
self.__connect_to_current_master(device)
if not device.config['master'].is_socket_open() or not len(
current_device_config[config_section]):
continue
# Reading data from device
for interested_data in range(len(current_device_config[config_section])):
current_data = current_device_config[config_section][interested_data]
current_data[DEVICE_NAME_PARAMETER] = device
input_data = self.__function_to_device(device, current_data)
device_responses[config_section][current_data[TAG_PARAMETER]] = {
"data_sent": current_data,
"input_data": input_data}
log.debug("Checking %s for device %s", config_section, device)
log.debug('Device response: ', device_responses)
if device_responses.get('timeseries') or device_responses.get('attributes'):
self.__convert_and_save_data((device, current_device_config, {
**current_device_config,
BYTE_ORDER_PARAMETER: current_device_config.get(BYTE_ORDER_PARAMETER,
device.byte_order),
WORD_ORDER_PARAMETER: current_device_config.get(WORD_ORDER_PARAMETER,
device.word_order)
}, device_responses))
except ConnectionException:
sleep(5)
log.error("Connection lost! Reconnecting...")
except Exception as e:
log.exception(e)
def __connect_to_current_master(self, device=None):
# TODO: write documentation
connect_attempt_count = 5
connect_attempt_time_ms = 100
wait_after_failed_attempts_ms = 300000
if device.config.get('master') is None:
device.config['master'], device.config['available_functions'] = self.__configure_master(device.config)
if connect_attempt_count < 1:
connect_attempt_count = 1
connect_attempt_time_ms = device.config.get('connectAttemptTimeMs', connect_attempt_time_ms)
if connect_attempt_time_ms < 500:
connect_attempt_time_ms = 500
wait_after_failed_attempts_ms = device.config.get('waitAfterFailedAttemptsMs', wait_after_failed_attempts_ms)
if wait_after_failed_attempts_ms < 1000:
wait_after_failed_attempts_ms = 1000
current_time = time() * 1000
if not device.config['master'].is_socket_open():
if device.config['connection_attempt'] >= connect_attempt_count and current_time - device.config[
'last_connection_attempt_time'] >= wait_after_failed_attempts_ms:
device.config['connection_attempt'] = 0
while not device.config['master'].is_socket_open() \
and device.config['connection_attempt'] < connect_attempt_count \
and current_time - device.config.get('last_connection_attempt_time',
0) >= connect_attempt_time_ms:
device.config['connection_attempt'] = device.config[
'connection_attempt'] + 1
device.config['last_connection_attempt_time'] = current_time
log.debug("Modbus trying connect to %s", device)
device.config['master'].connect()
if device.config['connection_attempt'] == connect_attempt_count:
log.warn("Maximum attempt count (%i) for device \"%s\" - encountered.", connect_attempt_count,
device)
if device.config['connection_attempt'] >= 0 and device.config['master'].is_socket_open():
device.config['connection_attempt'] = 0
device.config['last_connection_attempt_time'] = current_time
@staticmethod
def __configure_master(config):
current_config = config
current_config["rtu"] = FRAMER_TYPE[current_config['method']]
if current_config.get('type') == 'tcp':
master = ModbusTcpClient(current_config["host"],
current_config["port"],
current_config["rtu"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"])
elif current_config.get(TYPE_PARAMETER) == 'udp':
master = ModbusUdpClient(current_config["host"],
current_config["port"],
current_config["rtu"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"])
elif current_config.get(TYPE_PARAMETER) == 'serial':
master = ModbusSerialClient(method=current_config["method"],
port=current_config["port"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"],
baudrate=current_config["baudrate"],
stopbits=current_config["stopbits"],
bytesize=current_config["bytesize"],
parity=current_config["parity"],
strict=current_config["strict"])
else:
raise Exception("Invalid Modbus transport type.")
available_functions = {
1: master.read_coils,
2: master.read_discrete_inputs,
3: master.read_holding_registers,
4: master.read_input_registers,
5: master.write_coil,
6: master.write_register,
15: master.write_coils,
16: master.write_registers,
}
return master, available_functions
def __stop_connections_to_masters(self):
for slave in self.__slaves:
if slave.config.get('master') is not None and slave.config.get('master').is_socket_open():
slave.config['master'].close()
@staticmethod
def __function_to_device(device, config):
function_code = config.get('functionCode')
result = None
if function_code == 1:
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
count=config.get(OBJECTS_COUNT_PARAMETER,
config.get("registersCount",
config.get(
"registerCount",
1))) * 8,
unit=device.config['unitId'])
elif function_code in (2, 3, 4):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
count=config.get(OBJECTS_COUNT_PARAMETER,
config.get("registersCount",
config.get(
"registerCount",
1))),
unit=device.config['unitId'])
elif function_code in (5, 15):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
value=config[PAYLOAD_PARAMETER],
unit=device.config['unitId'] * 8)
elif function_code in (6, 16):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
values=config[PAYLOAD_PARAMETER],
unit=device.config['unitId'])
else:
log.error("Unknown Modbus function with code: %s", function_code)
log.debug("With result %s", str(result))
if "Exception" in str(result):
log.exception(result)
return result
def on_attributes_update(self, content):
try:
device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0]
for attribute_updates_command_config in device.config['attributeUpdates']:
for attribute_updated in content[DATA_PARAMETER]:
if attribute_updates_command_config[TAG_PARAMETER] == attribute_updated:
to_process = {
DEVICE_SECTION_PARAMETER: content[DEVICE_SECTION_PARAMETER],
DATA_PARAMETER: {
RPC_METHOD_PARAMETER: attribute_updated,
RPC_PARAMS_PARAMETER: content[DATA_PARAMETER][attribute_updated]
}
}
attribute_updates_command_config['byteOrder'] = device.byte_order or 'LITTLE'
attribute_updates_command_config['wordOrder'] = device.word_order or 'LITTLE'
self.__process_request(to_process, attribute_updates_command_config,
request_type='attributeUpdates')
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, server_rpc_request):
try:
if server_rpc_request.get(DEVICE_SECTION_PARAMETER) is not None:
log.debug("Modbus connector received rpc request for %s with server_rpc_request: %s",
server_rpc_request[DEVICE_SECTION_PARAMETER],
server_rpc_request)
device = tuple(
filter(
lambda slave: slave.name == server_rpc_request[DEVICE_SECTION_PARAMETER], self.__slaves
)
)[0]
if isinstance(device.config[RPC_SECTION], dict):
rpc_command_config = device.config[RPC_SECTION].get(
server_rpc_request[DATA_PARAMETER][RPC_METHOD_PARAMETER])
if rpc_command_config is not None:
self.__process_request(server_rpc_request, rpc_command_config)
elif isinstance(device.config[RPC_SECTION], list):
for rpc_command_config in device.config[RPC_SECTION]:
if rpc_command_config[TAG_PARAMETER] == server_rpc_request[DATA_PARAMETER][
RPC_METHOD_PARAMETER]:
self.__process_request(server_rpc_request, rpc_command_config)
break
else:
log.error("Received rpc request, but method %s not found in config for %s.",
server_rpc_request[DATA_PARAMETER].get(RPC_METHOD_PARAMETER),
self.get_name())
self.__gateway.send_rpc_reply(server_rpc_request[DEVICE_SECTION_PARAMETER],
server_rpc_request[DATA_PARAMETER][RPC_ID_PARAMETER],
{server_rpc_request[DATA_PARAMETER][
RPC_METHOD_PARAMETER]: "METHOD NOT FOUND!"})
else:
log.debug("Received RPC to connector: %r", server_rpc_request)
except Exception as e:
log.exception(e)
def __process_request(self, content, rpc_command_config, request_type='RPC'):
log.debug('Processing %s request', request_type)
if rpc_command_config is not None:
device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0]
rpc_command_config[UNIT_ID_PARAMETER] = device.config['unitId']
rpc_command_config[BYTE_ORDER_PARAMETER] = device.config.get("byteOrder", "LITTLE")
rpc_command_config[WORD_ORDER_PARAMETER] = device.config.get("wordOrder", "LITTLE")
self.__connect_to_current_master(device)
if rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (6, 16):
converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config,
content)
try:
rpc_command_config[PAYLOAD_PARAMETER] = converted_data[0]
except IndexError and TypeError:
rpc_command_config[PAYLOAD_PARAMETER] = converted_data
elif rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (5, 15):
converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config,
content)
rpc_command_config[PAYLOAD_PARAMETER] = converted_data
try:
response = self.__function_to_device(device, rpc_command_config)
except Exception as e:
log.exception(e)
response = e
if isinstance(response, (ReadRegistersResponseBase, ReadBitsResponseBase)):
to_converter = {
RPC_SECTION: {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: {"data_sent": rpc_command_config,
"input_data": response}}}
response = device.config[
UPLINK_PREFIX + CONVERTER_PARAMETER].convert(
config={**device.config,
BYTE_ORDER_PARAMETER: device.byte_order,
WORD_ORDER_PARAMETER: device.word_order
},
data=to_converter)
log.debug("Received %s method: %s, result: %r", request_type,
content[DATA_PARAMETER][RPC_METHOD_PARAMETER],
response)
elif isinstance(response, (WriteMultipleRegistersResponse,
WriteMultipleCoilsResponse,
WriteSingleCoilResponse,
WriteSingleRegisterResponse)):
log.debug("Write %r", str(response))
response = {"success": True}
if content.get(RPC_ID_PARAMETER) or (
content.get(DATA_PARAMETER) is not None and content[DATA_PARAMETER].get(RPC_ID_PARAMETER)):
if isinstance(response, Exception):
self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER],
content[DATA_PARAMETER][RPC_ID_PARAMETER],
{content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: str(response)})
else:
self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER],
content[DATA_PARAMETER][RPC_ID_PARAMETER],
response)
log.debug("%r", response)
| <filename>thingsboard_gateway/connectors/modbus/modbus_connector.py
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from time import sleep, time
from queue import Queue
from random import choice
from string import ascii_lowercase
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
# Try import Pymodbus library or install it and import
try:
from pymodbus.constants import Defaults
except ImportError:
print("Modbus library not found - installing...")
TBUtility.install_package("pymodbus", ">=2.3.0")
TBUtility.install_package('pyserial')
from pymodbus.constants import Defaults
try:
from twisted.internet import reactor
except ImportError:
TBUtility.install_package('twisted')
from twisted.internet import reactor
from twisted.internet import reactor
from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse
from pymodbus.register_write_message import WriteMultipleRegistersResponse, WriteSingleRegisterResponse
from pymodbus.register_read_message import ReadRegistersResponseBase
from pymodbus.bit_read_message import ReadBitsResponseBase
from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient
from pymodbus.client.sync import ModbusRtuFramer, ModbusSocketFramer, ModbusAsciiFramer
from pymodbus.exceptions import ConnectionException
from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.version import version
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.datastore import ModbusSparseDataBlock
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.modbus.constants import *
from thingsboard_gateway.connectors.modbus.slave import Slave
from thingsboard_gateway.connectors.modbus.backward_compability_adapter import BackwardCompatibilityAdapter
from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter
CONVERTED_DATA_SECTIONS = [ATTRIBUTES_PARAMETER, TELEMETRY_PARAMETER]
FRAMER_TYPE = {
'rtu': ModbusRtuFramer,
'socket': ModbusSocketFramer,
'ascii': ModbusAsciiFramer
}
SLAVE_TYPE = {
'tcp': StartTcpServer,
'udp': StartUdpServer,
'serial': StartSerialServer
}
FUNCTION_TYPE = {
'coils_initializer': 'co',
'holding_registers': 'hr',
'input_registers': 'ir',
'discrete_inputs': 'di'
}
FUNCTION_CODE_WRITE = {
'holding_registers': (6, 16),
'coils_initializer': (5, 15)
}
FUNCTION_CODE_READ = {
'holding_registers': 3,
'coils_initializer': 1,
'input_registers': 4,
'discrete_inputs': 2
}
class ModbusConnector(Connector, Thread):
process_requests = Queue(-1)
def __init__(self, gateway, config, connector_type):
self.statistics = {STATISTIC_MESSAGE_RECEIVED_PARAMETER: 0,
STATISTIC_MESSAGE_SENT_PARAMETER: 0}
super().__init__()
self.__gateway = gateway
self._connector_type = connector_type
self.__backward_compatibility_adapter = BackwardCompatibilityAdapter(config, gateway.get_config_path())
self.__config = self.__backward_compatibility_adapter.convert()
self.setName(self.__config.get("name", 'Modbus Default ' + ''.join(choice(ascii_lowercase) for _ in range(5))))
self.__connected = False
self.__stopped = False
self.daemon = True
if self.__config.get('slave'):
self.__slave_thread = Thread(target=self.__configure_and_run_slave, args=(self.__config['slave'],),
daemon=True, name='Gateway as a slave')
self.__slave_thread.start()
if config['slave'].get('sendDataToThingsBoard', False):
self.__modify_main_config()
self.__slaves = []
self.__load_slaves()
def is_connected(self):
return self.__connected
def open(self):
self.__stopped = False
self.start()
def run(self):
self.__connected = True
while True:
if not self.__stopped and not ModbusConnector.process_requests.empty():
thread = Thread(target=self.__process_slaves, daemon=True)
thread.start()
if self.__stopped:
break
sleep(.2)
@staticmethod
def __configure_and_run_slave(config):
identity = None
if config.get('identity'):
identity = ModbusDeviceIdentification()
identity.VendorName = config['identity'].get('vendorName', '')
identity.ProductCode = config['identity'].get('productCode', '')
identity.VendorUrl = config['identity'].get('vendorUrl', '')
identity.ProductName = config['identity'].get('productName', '')
identity.ModelName = config['identity'].get('ModelName', '')
identity.MajorMinorRevision = version.short()
blocks = {}
for (key, value) in config.get('values').items():
values = {}
converter = BytesModbusDownlinkConverter({})
for item in value:
for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'):
for val in item.get(section, []):
function_code = FUNCTION_CODE_WRITE[key][0] if val['objectsCount'] <= 1 else \
FUNCTION_CODE_WRITE[key][1]
converted_value = converter.convert(
{**val,
'device': config.get('deviceName', 'Gateway'), 'functionCode': function_code,
'byteOrder': config['byteOrder'], 'wordOrder': config['wordOrder']},
{'data': {'params': val['value']}})
values[val['address'] + 1] = converted_value
blocks[FUNCTION_TYPE[key]] = ModbusSparseDataBlock(values)
context = ModbusServerContext(slaves=ModbusSlaveContext(**blocks), single=True)
SLAVE_TYPE[config['type']](context, identity=identity,
address=(config.get('host'), config.get('port')) if (
config['type'] == 'tcp' or 'udp') else None,
port=config.get('port') if config['type'] == 'serial' else None,
framer=FRAMER_TYPE[config['method']])
def __modify_main_config(self):
config = self.__config['slave']
values = config.pop('values')
device = config
for (register, reg_values) in values.items():
for value in reg_values:
for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'):
if not device.get(section):
device[section] = []
for item in value.get(section, []):
device[section].append({**item, 'functionCode': FUNCTION_CODE_READ[
register] if section not in ('attributeUpdates', 'rpc') else item['functionCode']})
self.__config['master']['slaves'].append(device)
def __load_slaves(self):
self.__slaves = [
Slave(**{**device, 'connector': self, 'gateway': self.__gateway, 'callback': ModbusConnector.callback}) for
device in self.__config.get('master', {'slaves': []}).get('slaves', [])]
@classmethod
def callback(cls, slave):
cls.process_requests.put(slave)
@property
def connector_type(self):
return self._connector_type
def __convert_and_save_data(self, config_tuple):
device, current_device_config, config, device_responses = config_tuple
converted_data = {}
try:
converted_data = device.config[UPLINK_PREFIX + CONVERTER_PARAMETER].convert(
config=config,
data=device_responses)
except Exception as e:
log.error(e)
to_send = {DEVICE_NAME_PARAMETER: converted_data[DEVICE_NAME_PARAMETER],
DEVICE_TYPE_PARAMETER: converted_data[DEVICE_TYPE_PARAMETER],
TELEMETRY_PARAMETER: [],
ATTRIBUTES_PARAMETER: []
}
if current_device_config.get('sendDataOnlyOnChange'):
self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1
for converted_data_section in CONVERTED_DATA_SECTIONS:
for current_section_dict in converted_data[converted_data_section]:
for key, value in current_section_dict.items():
if device.config[LAST_PREFIX + converted_data_section].get(key) is None or \
device.config[LAST_PREFIX + converted_data_section][key] != value:
device.config[LAST_PREFIX + converted_data_section][key] = value
to_send[converted_data_section].append({key: value})
elif converted_data and current_device_config.get('sendDataOnlyOnChange') is None or \
not current_device_config.get('sendDataOnlyOnChange'):
self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1
for converted_data_section in CONVERTED_DATA_SECTIONS:
device.config[LAST_PREFIX + converted_data_section] = converted_data[
converted_data_section]
to_send[converted_data_section] = converted_data[converted_data_section]
if to_send.get(ATTRIBUTES_PARAMETER) or to_send.get(TELEMETRY_PARAMETER):
self.__gateway.send_to_storage(self.get_name(), to_send)
self.statistics[STATISTIC_MESSAGE_SENT_PARAMETER] += 1
def close(self):
self.__stopped = True
self.__stop_connections_to_masters()
if reactor.running:
StopServer()
log.info('%s has been stopped.', self.get_name())
def get_name(self):
return self.name
def __process_slaves(self):
# TODO: write documentation
device = ModbusConnector.process_requests.get()
device_responses = {'timeseries': {}, 'attributes': {}}
current_device_config = {}
try:
for config_section in device_responses:
if device.config.get(config_section) is not None:
current_device_config = device.config
self.__connect_to_current_master(device)
if not device.config['master'].is_socket_open() or not len(
current_device_config[config_section]):
continue
# Reading data from device
for interested_data in range(len(current_device_config[config_section])):
current_data = current_device_config[config_section][interested_data]
current_data[DEVICE_NAME_PARAMETER] = device
input_data = self.__function_to_device(device, current_data)
device_responses[config_section][current_data[TAG_PARAMETER]] = {
"data_sent": current_data,
"input_data": input_data}
log.debug("Checking %s for device %s", config_section, device)
log.debug('Device response: ', device_responses)
if device_responses.get('timeseries') or device_responses.get('attributes'):
self.__convert_and_save_data((device, current_device_config, {
**current_device_config,
BYTE_ORDER_PARAMETER: current_device_config.get(BYTE_ORDER_PARAMETER,
device.byte_order),
WORD_ORDER_PARAMETER: current_device_config.get(WORD_ORDER_PARAMETER,
device.word_order)
}, device_responses))
except ConnectionException:
sleep(5)
log.error("Connection lost! Reconnecting...")
except Exception as e:
log.exception(e)
def __connect_to_current_master(self, device=None):
# TODO: write documentation
connect_attempt_count = 5
connect_attempt_time_ms = 100
wait_after_failed_attempts_ms = 300000
if device.config.get('master') is None:
device.config['master'], device.config['available_functions'] = self.__configure_master(device.config)
if connect_attempt_count < 1:
connect_attempt_count = 1
connect_attempt_time_ms = device.config.get('connectAttemptTimeMs', connect_attempt_time_ms)
if connect_attempt_time_ms < 500:
connect_attempt_time_ms = 500
wait_after_failed_attempts_ms = device.config.get('waitAfterFailedAttemptsMs', wait_after_failed_attempts_ms)
if wait_after_failed_attempts_ms < 1000:
wait_after_failed_attempts_ms = 1000
current_time = time() * 1000
if not device.config['master'].is_socket_open():
if device.config['connection_attempt'] >= connect_attempt_count and current_time - device.config[
'last_connection_attempt_time'] >= wait_after_failed_attempts_ms:
device.config['connection_attempt'] = 0
while not device.config['master'].is_socket_open() \
and device.config['connection_attempt'] < connect_attempt_count \
and current_time - device.config.get('last_connection_attempt_time',
0) >= connect_attempt_time_ms:
device.config['connection_attempt'] = device.config[
'connection_attempt'] + 1
device.config['last_connection_attempt_time'] = current_time
log.debug("Modbus trying connect to %s", device)
device.config['master'].connect()
if device.config['connection_attempt'] == connect_attempt_count:
log.warn("Maximum attempt count (%i) for device \"%s\" - encountered.", connect_attempt_count,
device)
if device.config['connection_attempt'] >= 0 and device.config['master'].is_socket_open():
device.config['connection_attempt'] = 0
device.config['last_connection_attempt_time'] = current_time
@staticmethod
def __configure_master(config):
current_config = config
current_config["rtu"] = FRAMER_TYPE[current_config['method']]
if current_config.get('type') == 'tcp':
master = ModbusTcpClient(current_config["host"],
current_config["port"],
current_config["rtu"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"])
elif current_config.get(TYPE_PARAMETER) == 'udp':
master = ModbusUdpClient(current_config["host"],
current_config["port"],
current_config["rtu"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"])
elif current_config.get(TYPE_PARAMETER) == 'serial':
master = ModbusSerialClient(method=current_config["method"],
port=current_config["port"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"],
baudrate=current_config["baudrate"],
stopbits=current_config["stopbits"],
bytesize=current_config["bytesize"],
parity=current_config["parity"],
strict=current_config["strict"])
else:
raise Exception("Invalid Modbus transport type.")
available_functions = {
1: master.read_coils,
2: master.read_discrete_inputs,
3: master.read_holding_registers,
4: master.read_input_registers,
5: master.write_coil,
6: master.write_register,
15: master.write_coils,
16: master.write_registers,
}
return master, available_functions
def __stop_connections_to_masters(self):
for slave in self.__slaves:
if slave.config.get('master') is not None and slave.config.get('master').is_socket_open():
slave.config['master'].close()
@staticmethod
def __function_to_device(device, config):
function_code = config.get('functionCode')
result = None
if function_code == 1:
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
count=config.get(OBJECTS_COUNT_PARAMETER,
config.get("registersCount",
config.get(
"registerCount",
1))) * 8,
unit=device.config['unitId'])
elif function_code in (2, 3, 4):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
count=config.get(OBJECTS_COUNT_PARAMETER,
config.get("registersCount",
config.get(
"registerCount",
1))),
unit=device.config['unitId'])
elif function_code in (5, 15):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
value=config[PAYLOAD_PARAMETER],
unit=device.config['unitId'] * 8)
elif function_code in (6, 16):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
values=config[PAYLOAD_PARAMETER],
unit=device.config['unitId'])
else:
log.error("Unknown Modbus function with code: %s", function_code)
log.debug("With result %s", str(result))
if "Exception" in str(result):
log.exception(result)
return result
def on_attributes_update(self, content):
try:
device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0]
for attribute_updates_command_config in device.config['attributeUpdates']:
for attribute_updated in content[DATA_PARAMETER]:
if attribute_updates_command_config[TAG_PARAMETER] == attribute_updated:
to_process = {
DEVICE_SECTION_PARAMETER: content[DEVICE_SECTION_PARAMETER],
DATA_PARAMETER: {
RPC_METHOD_PARAMETER: attribute_updated,
RPC_PARAMS_PARAMETER: content[DATA_PARAMETER][attribute_updated]
}
}
attribute_updates_command_config['byteOrder'] = device.byte_order or 'LITTLE'
attribute_updates_command_config['wordOrder'] = device.word_order or 'LITTLE'
self.__process_request(to_process, attribute_updates_command_config,
request_type='attributeUpdates')
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, server_rpc_request):
try:
if server_rpc_request.get(DEVICE_SECTION_PARAMETER) is not None:
log.debug("Modbus connector received rpc request for %s with server_rpc_request: %s",
server_rpc_request[DEVICE_SECTION_PARAMETER],
server_rpc_request)
device = tuple(
filter(
lambda slave: slave.name == server_rpc_request[DEVICE_SECTION_PARAMETER], self.__slaves
)
)[0]
if isinstance(device.config[RPC_SECTION], dict):
rpc_command_config = device.config[RPC_SECTION].get(
server_rpc_request[DATA_PARAMETER][RPC_METHOD_PARAMETER])
if rpc_command_config is not None:
self.__process_request(server_rpc_request, rpc_command_config)
elif isinstance(device.config[RPC_SECTION], list):
for rpc_command_config in device.config[RPC_SECTION]:
if rpc_command_config[TAG_PARAMETER] == server_rpc_request[DATA_PARAMETER][
RPC_METHOD_PARAMETER]:
self.__process_request(server_rpc_request, rpc_command_config)
break
else:
log.error("Received rpc request, but method %s not found in config for %s.",
server_rpc_request[DATA_PARAMETER].get(RPC_METHOD_PARAMETER),
self.get_name())
self.__gateway.send_rpc_reply(server_rpc_request[DEVICE_SECTION_PARAMETER],
server_rpc_request[DATA_PARAMETER][RPC_ID_PARAMETER],
{server_rpc_request[DATA_PARAMETER][
RPC_METHOD_PARAMETER]: "METHOD NOT FOUND!"})
else:
log.debug("Received RPC to connector: %r", server_rpc_request)
except Exception as e:
log.exception(e)
def __process_request(self, content, rpc_command_config, request_type='RPC'):
log.debug('Processing %s request', request_type)
if rpc_command_config is not None:
device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0]
rpc_command_config[UNIT_ID_PARAMETER] = device.config['unitId']
rpc_command_config[BYTE_ORDER_PARAMETER] = device.config.get("byteOrder", "LITTLE")
rpc_command_config[WORD_ORDER_PARAMETER] = device.config.get("wordOrder", "LITTLE")
self.__connect_to_current_master(device)
if rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (6, 16):
converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config,
content)
try:
rpc_command_config[PAYLOAD_PARAMETER] = converted_data[0]
except IndexError and TypeError:
rpc_command_config[PAYLOAD_PARAMETER] = converted_data
elif rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (5, 15):
converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config,
content)
rpc_command_config[PAYLOAD_PARAMETER] = converted_data
try:
response = self.__function_to_device(device, rpc_command_config)
except Exception as e:
log.exception(e)
response = e
if isinstance(response, (ReadRegistersResponseBase, ReadBitsResponseBase)):
to_converter = {
RPC_SECTION: {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: {"data_sent": rpc_command_config,
"input_data": response}}}
response = device.config[
UPLINK_PREFIX + CONVERTER_PARAMETER].convert(
config={**device.config,
BYTE_ORDER_PARAMETER: device.byte_order,
WORD_ORDER_PARAMETER: device.word_order
},
data=to_converter)
log.debug("Received %s method: %s, result: %r", request_type,
content[DATA_PARAMETER][RPC_METHOD_PARAMETER],
response)
elif isinstance(response, (WriteMultipleRegistersResponse,
WriteMultipleCoilsResponse,
WriteSingleCoilResponse,
WriteSingleRegisterResponse)):
log.debug("Write %r", str(response))
response = {"success": True}
if content.get(RPC_ID_PARAMETER) or (
content.get(DATA_PARAMETER) is not None and content[DATA_PARAMETER].get(RPC_ID_PARAMETER)):
if isinstance(response, Exception):
self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER],
content[DATA_PARAMETER][RPC_ID_PARAMETER],
{content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: str(response)})
else:
self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER],
content[DATA_PARAMETER][RPC_ID_PARAMETER],
response)
log.debug("%r", response)
| en | 0.781446 | # Copyright 2022. ThingsBoard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Try import Pymodbus library or install it and import # TODO: write documentation # Reading data from device # TODO: write documentation | 1.949008 | 2 |
specs/test_gru_on_flat_babyai.py | xwu20/wmg_agent | 23 | 9412 | <reponame>xwu20/wmg_agent<filename>specs/test_gru_on_flat_babyai.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
### CONTROLS (non-tunable) ###
# general
TYPE_OF_RUN = test_episodes # train, test, test_episodes, render
NUM_EPISODES_TO_TEST = 1000
MIN_FINAL_REWARD_FOR_SUCCESS = 1.0
LOAD_MODEL_FROM = models/gru_flat_babyai.pth
SAVE_MODELS_TO = None
# worker.py
ENV = BabyAI_Env
ENV_RANDOM_SEED = 1
AGENT_RANDOM_SEED = 1
REPORTING_INTERVAL = 1
TOTAL_STEPS = 1
ANNEAL_LR = False
# A3cAgent
AGENT_NET = GRU_Network
# BabyAI_Env
BABYAI_ENV_LEVEL = BabyAI-GoToLocal-v0
USE_SUCCESS_RATE = True
SUCCESS_RATE_THRESHOLD = 0.99
HELDOUT_TESTING = False
NUM_TEST_EPISODES = 10000
OBS_ENCODER = Flat
BINARY_REWARD = True
### HYPERPARAMETERS (tunable) ###
# A3cAgent
A3C_T_MAX = 4
LEARNING_RATE = 4e-05
DISCOUNT_FACTOR = 0.9
GRADIENT_CLIP = 512.0
ENTROPY_TERM_STRENGTH = 0.02
ADAM_EPS = 1e-12
REWARD_SCALE = 2.0
WEIGHT_DECAY = 0.
# RNNs
NUM_RNN_UNITS = 96
OBS_EMBED_SIZE = 512
AC_HIDDEN_LAYER_SIZE = 4096
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
### CONTROLS (non-tunable) ###
# general
TYPE_OF_RUN = test_episodes # train, test, test_episodes, render
NUM_EPISODES_TO_TEST = 1000
MIN_FINAL_REWARD_FOR_SUCCESS = 1.0
LOAD_MODEL_FROM = models/gru_flat_babyai.pth
SAVE_MODELS_TO = None
# worker.py
ENV = BabyAI_Env
ENV_RANDOM_SEED = 1
AGENT_RANDOM_SEED = 1
REPORTING_INTERVAL = 1
TOTAL_STEPS = 1
ANNEAL_LR = False
# A3cAgent
AGENT_NET = GRU_Network
# BabyAI_Env
BABYAI_ENV_LEVEL = BabyAI-GoToLocal-v0
USE_SUCCESS_RATE = True
SUCCESS_RATE_THRESHOLD = 0.99
HELDOUT_TESTING = False
NUM_TEST_EPISODES = 10000
OBS_ENCODER = Flat
BINARY_REWARD = True
### HYPERPARAMETERS (tunable) ###
# A3cAgent
A3C_T_MAX = 4
LEARNING_RATE = 4e-05
DISCOUNT_FACTOR = 0.9
GRADIENT_CLIP = 512.0
ENTROPY_TERM_STRENGTH = 0.02
ADAM_EPS = 1e-12
REWARD_SCALE = 2.0
WEIGHT_DECAY = 0.
# RNNs
NUM_RNN_UNITS = 96
OBS_EMBED_SIZE = 512
AC_HIDDEN_LAYER_SIZE = 4096 | en | 0.514611 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. ### CONTROLS (non-tunable) ### # general # train, test, test_episodes, render # worker.py # A3cAgent # BabyAI_Env ### HYPERPARAMETERS (tunable) ### # A3cAgent # RNNs | 1.254636 | 1 |
haskell/private/actions/runghc.bzl | meisterT/rules_haskell | 0 | 9413 | <gh_stars>0
"""runghc support"""
load(":private/context.bzl", "render_env")
load(":private/packages.bzl", "expose_packages", "pkg_info_to_compile_flags")
load(
":private/path_utils.bzl",
"link_libraries",
"ln",
"target_unique_name",
)
load(
":private/set.bzl",
"set",
)
load(":providers.bzl", "get_ghci_extra_libs")
load("@bazel_skylib//lib:shell.bzl", "shell")
def build_haskell_runghc(
hs,
runghc_wrapper,
user_compile_flags,
extra_args,
hs_info,
cc_info,
output,
package_databases,
version,
lib_info = None):
"""Build runghc script.
Args:
hs: Haskell context.
hs_info: HaskellInfo.
package_databases: package caches excluding the cache file of the package
we're creating a runghc for.
lib_info: If we're building runghc for a library target, pass
HaskellLibraryInfo here, otherwise it should be None.
Returns:
None.
"""
(pkg_info_inputs, args) = pkg_info_to_compile_flags(
hs,
pkg_info = expose_packages(
package_ids = hs.package_ids,
package_databases = package_databases,
version = version,
),
prefix = "runghc-",
)
if lib_info != None:
for idir in set.to_list(hs_info.import_dirs):
args += ["-i{0}".format(idir)]
(ghci_extra_libs, ghc_env) = get_ghci_extra_libs(
hs,
cc_info,
path_prefix = "$RULES_HASKELL_EXEC_ROOT",
)
link_libraries(ghci_extra_libs, args)
runghc_file = hs.actions.declare_file(target_unique_name(hs, "runghc"))
# Extra arguments.
# `compiler flags` is the default set of arguments for runghc,
# augmented by `extra_args`.
# The ordering is important, first compiler flags (from toolchain
# and local rule), then from `extra_args`. This way the more
# specific arguments are listed last, and then have more priority in
# GHC.
# Note that most flags for GHCI do have their negative value, so a
# negative flag in `extra_args` can disable a positive flag set
# in `user_compile_flags`, such as `-XNoOverloadedStrings` will disable
# `-XOverloadedStrings`.
args += hs.toolchain.compiler_flags + user_compile_flags + hs.toolchain.repl_ghci_args
# ghc args need to be wrapped up in "--ghc-arg=" when passing to runghc
runcompile_flags = ["--ghc-arg=%s" % a for a in args]
runcompile_flags += extra_args
hs.actions.expand_template(
template = runghc_wrapper,
output = runghc_file,
substitutions = {
"{ENV}": render_env(ghc_env),
"{TOOL}": hs.tools.runghc.path,
"{CC}": hs.toolchain.cc_wrapper.executable.path,
"{ARGS}": " ".join([shell.quote(a) for a in runcompile_flags]),
},
is_executable = True,
)
# XXX We create a symlink here because we need to force
# hs.tools.runghc and the best way to do that is
# to use hs.actions.run. That action, in turn must produce
# a result, so using ln seems to be the only sane choice.
extra_inputs = depset(transitive = [
depset([
hs.tools.runghc,
runghc_file,
]),
package_databases,
pkg_info_inputs,
ghci_extra_libs,
hs_info.source_files,
hs.toolchain.cc_wrapper.runfiles.files,
])
ln(hs, runghc_file, output, extra_inputs)
| """runghc support"""
load(":private/context.bzl", "render_env")
load(":private/packages.bzl", "expose_packages", "pkg_info_to_compile_flags")
load(
":private/path_utils.bzl",
"link_libraries",
"ln",
"target_unique_name",
)
load(
":private/set.bzl",
"set",
)
load(":providers.bzl", "get_ghci_extra_libs")
load("@bazel_skylib//lib:shell.bzl", "shell")
def build_haskell_runghc(
hs,
runghc_wrapper,
user_compile_flags,
extra_args,
hs_info,
cc_info,
output,
package_databases,
version,
lib_info = None):
"""Build runghc script.
Args:
hs: Haskell context.
hs_info: HaskellInfo.
package_databases: package caches excluding the cache file of the package
we're creating a runghc for.
lib_info: If we're building runghc for a library target, pass
HaskellLibraryInfo here, otherwise it should be None.
Returns:
None.
"""
(pkg_info_inputs, args) = pkg_info_to_compile_flags(
hs,
pkg_info = expose_packages(
package_ids = hs.package_ids,
package_databases = package_databases,
version = version,
),
prefix = "runghc-",
)
if lib_info != None:
for idir in set.to_list(hs_info.import_dirs):
args += ["-i{0}".format(idir)]
(ghci_extra_libs, ghc_env) = get_ghci_extra_libs(
hs,
cc_info,
path_prefix = "$RULES_HASKELL_EXEC_ROOT",
)
link_libraries(ghci_extra_libs, args)
runghc_file = hs.actions.declare_file(target_unique_name(hs, "runghc"))
# Extra arguments.
# `compiler flags` is the default set of arguments for runghc,
# augmented by `extra_args`.
# The ordering is important, first compiler flags (from toolchain
# and local rule), then from `extra_args`. This way the more
# specific arguments are listed last, and then have more priority in
# GHC.
# Note that most flags for GHCI do have their negative value, so a
# negative flag in `extra_args` can disable a positive flag set
# in `user_compile_flags`, such as `-XNoOverloadedStrings` will disable
# `-XOverloadedStrings`.
args += hs.toolchain.compiler_flags + user_compile_flags + hs.toolchain.repl_ghci_args
# ghc args need to be wrapped up in "--ghc-arg=" when passing to runghc
runcompile_flags = ["--ghc-arg=%s" % a for a in args]
runcompile_flags += extra_args
hs.actions.expand_template(
template = runghc_wrapper,
output = runghc_file,
substitutions = {
"{ENV}": render_env(ghc_env),
"{TOOL}": hs.tools.runghc.path,
"{CC}": hs.toolchain.cc_wrapper.executable.path,
"{ARGS}": " ".join([shell.quote(a) for a in runcompile_flags]),
},
is_executable = True,
)
# XXX We create a symlink here because we need to force
# hs.tools.runghc and the best way to do that is
# to use hs.actions.run. That action, in turn must produce
# a result, so using ln seems to be the only sane choice.
extra_inputs = depset(transitive = [
depset([
hs.tools.runghc,
runghc_file,
]),
package_databases,
pkg_info_inputs,
ghci_extra_libs,
hs_info.source_files,
hs.toolchain.cc_wrapper.runfiles.files,
])
ln(hs, runghc_file, output, extra_inputs) | en | 0.753155 | runghc support Build runghc script. Args: hs: Haskell context. hs_info: HaskellInfo. package_databases: package caches excluding the cache file of the package we're creating a runghc for. lib_info: If we're building runghc for a library target, pass HaskellLibraryInfo here, otherwise it should be None. Returns: None. # Extra arguments. # `compiler flags` is the default set of arguments for runghc, # augmented by `extra_args`. # The ordering is important, first compiler flags (from toolchain # and local rule), then from `extra_args`. This way the more # specific arguments are listed last, and then have more priority in # GHC. # Note that most flags for GHCI do have their negative value, so a # negative flag in `extra_args` can disable a positive flag set # in `user_compile_flags`, such as `-XNoOverloadedStrings` will disable # `-XOverloadedStrings`. # ghc args need to be wrapped up in "--ghc-arg=" when passing to runghc # XXX We create a symlink here because we need to force # hs.tools.runghc and the best way to do that is # to use hs.actions.run. That action, in turn must produce # a result, so using ln seems to be the only sane choice. | 2.051249 | 2 |
tests/dicom/test_header_tweaks.py | pymedphys/pymedphys-archive-2019 | 1 | 9414 | # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import uuid
import numpy as np
import pydicom
from pymedphys._dicom.create import dicom_dataset_from_dict
from pymedphys._dicom.header import (
RED_adjustment_map_from_structure_names,
adjust_machine_name,
adjust_RED_by_structure_name,
adjust_rel_elec_density,
)
from pymedphys._dicom.utilities import remove_file
HERE = os.path.dirname(__file__)
ORIGINAL_DICOM_FILENAME = os.path.join(
HERE, "scratch", "original-{}.dcm".format(str(uuid.uuid4()))
)
ADJUSTED_DICOM_FILENAME = os.path.join(
HERE, "scratch", "adjusted-{}.dcm".format(str(uuid.uuid4()))
)
def compare_dicom_cli(command, original, expected):
pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)
try:
subprocess.check_call(command)
cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True)
assert str(cli_adjusted_ds) == str(expected)
finally:
remove_file(ORIGINAL_DICOM_FILENAME)
remove_file(ADJUSTED_DICOM_FILENAME)
def test_adjust_machine_name():
new_name = "new_name"
original_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": "hello"},
{"TreatmentMachineName": "george"},
]
}
)
expected_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": new_name},
{"TreatmentMachineName": new_name},
]
}
)
adjusted_ds = adjust_machine_name(original_ds, new_name)
assert adjusted_ds != original_ds
assert adjusted_ds == expected_ds
command = "pymedphys dicom adjust-machine-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
new_name,
]
compare_dicom_cli(command, original_ds, expected_ds)
def test_electron_density_append():
adjustment_map = {
"to_be_changed 1": 1.0,
"to_be_changed 2": 0.5,
"to_be_changed 3": 1.5,
}
excess_adjustment_map = {**adjustment_map, **{"this_structure_doesnt_exist": 1.0}}
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{"ROINumber": 1, "ROIName": "to_be_changed 1"},
{"ROINumber": 2, "ROIName": "dont_change_me"},
{"ROINumber": 10, "ROIName": "to_be_changed 2"},
{"ROINumber": 99, "ROIName": "to_be_changed 3"},
],
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
}
],
},
{"ReferencedROINumber": 2},
{"ReferencedROINumber": 10},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": 0,
}
],
},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
},
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 1"
],
},
],
},
{"ReferencedROINumber": 2},
{
"ReferencedROINumber": 10,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 2"
],
}
],
},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 3"
],
}
],
},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_rel_elec_density(original_ds, adjustment_map)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
adjusted_with_excess_ds = adjust_rel_elec_density(
original_ds, excess_adjustment_map, ignore_missing_structure=True
)
assert adjusted_with_excess_ds != original_ds
assert str(expected_ds) == str(adjusted_with_excess_ds)
excess_adjustment_map_as_list = [
["{}".format(key), item] for key, item in excess_adjustment_map.items()
]
excess_adjustment_map_flat = np.concatenate(excess_adjustment_map_as_list).tolist()
command = (
"pymedphys dicom adjust-RED -i ".split()
+ [ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME]
+ excess_adjustment_map_flat
)
compare_dicom_cli(command, original_ds, expected_ds)
def test_structure_name_parse():
structure_names = [
"a RED=1",
"b",
"c",
"d RED=2.2",
"e red = 3",
"f",
"g Red: 4.7",
"h RED=0.5 ",
]
expected_adjustment_map = {
"a RED=1": 1,
"d RED=2.2": 2.2,
"e red = 3": 3,
"g Red: 4.7": 4.7,
"h RED=0.5 ": 0.5,
}
adjustment_map = RED_adjustment_map_from_structure_names(structure_names)
assert expected_adjustment_map == adjustment_map
def test_structure_name_based_RED_append():
electron_density_to_use = 0.5
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{
"ROINumber": 1,
"ROIName": "a_structure RED={}".format(electron_density_to_use),
},
{"ROINumber": 2, "ROIName": "dont_change_me"},
],
"RTROIObservationsSequence": [
{"ReferencedROINumber": 1},
{"ReferencedROINumber": 2},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": electron_density_to_use,
}
],
},
{"ReferencedROINumber": 2},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_RED_by_structure_name(original_ds)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
command = "pymedphys dicom adjust-RED-by-structure-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
]
compare_dicom_cli(command, original_ds, expected_ds)
| # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import uuid
import numpy as np
import pydicom
from pymedphys._dicom.create import dicom_dataset_from_dict
from pymedphys._dicom.header import (
RED_adjustment_map_from_structure_names,
adjust_machine_name,
adjust_RED_by_structure_name,
adjust_rel_elec_density,
)
from pymedphys._dicom.utilities import remove_file
HERE = os.path.dirname(__file__)
ORIGINAL_DICOM_FILENAME = os.path.join(
HERE, "scratch", "original-{}.dcm".format(str(uuid.uuid4()))
)
ADJUSTED_DICOM_FILENAME = os.path.join(
HERE, "scratch", "adjusted-{}.dcm".format(str(uuid.uuid4()))
)
def compare_dicom_cli(command, original, expected):
pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)
try:
subprocess.check_call(command)
cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True)
assert str(cli_adjusted_ds) == str(expected)
finally:
remove_file(ORIGINAL_DICOM_FILENAME)
remove_file(ADJUSTED_DICOM_FILENAME)
def test_adjust_machine_name():
new_name = "new_name"
original_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": "hello"},
{"TreatmentMachineName": "george"},
]
}
)
expected_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": new_name},
{"TreatmentMachineName": new_name},
]
}
)
adjusted_ds = adjust_machine_name(original_ds, new_name)
assert adjusted_ds != original_ds
assert adjusted_ds == expected_ds
command = "pymedphys dicom adjust-machine-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
new_name,
]
compare_dicom_cli(command, original_ds, expected_ds)
def test_electron_density_append():
adjustment_map = {
"to_be_changed 1": 1.0,
"to_be_changed 2": 0.5,
"to_be_changed 3": 1.5,
}
excess_adjustment_map = {**adjustment_map, **{"this_structure_doesnt_exist": 1.0}}
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{"ROINumber": 1, "ROIName": "to_be_changed 1"},
{"ROINumber": 2, "ROIName": "dont_change_me"},
{"ROINumber": 10, "ROIName": "to_be_changed 2"},
{"ROINumber": 99, "ROIName": "to_be_changed 3"},
],
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
}
],
},
{"ReferencedROINumber": 2},
{"ReferencedROINumber": 10},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": 0,
}
],
},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
},
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 1"
],
},
],
},
{"ReferencedROINumber": 2},
{
"ReferencedROINumber": 10,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 2"
],
}
],
},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 3"
],
}
],
},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_rel_elec_density(original_ds, adjustment_map)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
adjusted_with_excess_ds = adjust_rel_elec_density(
original_ds, excess_adjustment_map, ignore_missing_structure=True
)
assert adjusted_with_excess_ds != original_ds
assert str(expected_ds) == str(adjusted_with_excess_ds)
excess_adjustment_map_as_list = [
["{}".format(key), item] for key, item in excess_adjustment_map.items()
]
excess_adjustment_map_flat = np.concatenate(excess_adjustment_map_as_list).tolist()
command = (
"pymedphys dicom adjust-RED -i ".split()
+ [ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME]
+ excess_adjustment_map_flat
)
compare_dicom_cli(command, original_ds, expected_ds)
def test_structure_name_parse():
structure_names = [
"a RED=1",
"b",
"c",
"d RED=2.2",
"e red = 3",
"f",
"g Red: 4.7",
"h RED=0.5 ",
]
expected_adjustment_map = {
"a RED=1": 1,
"d RED=2.2": 2.2,
"e red = 3": 3,
"g Red: 4.7": 4.7,
"h RED=0.5 ": 0.5,
}
adjustment_map = RED_adjustment_map_from_structure_names(structure_names)
assert expected_adjustment_map == adjustment_map
def test_structure_name_based_RED_append():
electron_density_to_use = 0.5
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{
"ROINumber": 1,
"ROIName": "a_structure RED={}".format(electron_density_to_use),
},
{"ROINumber": 2, "ROIName": "dont_change_me"},
],
"RTROIObservationsSequence": [
{"ReferencedROINumber": 1},
{"ReferencedROINumber": 2},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": electron_density_to_use,
}
],
},
{"ReferencedROINumber": 2},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_RED_by_structure_name(original_ds)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
command = "pymedphys dicom adjust-RED-by-structure-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
]
compare_dicom_cli(command, original_ds, expected_ds)
| en | 0.852217 | # Copyright (C) 2019 Cancer Care Associates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.129616 | 2 |
tests/components/http/test_data_validator.py | itewk/home-assistant | 23 | 9415 | <gh_stars>10-100
"""Test data validator decorator."""
from unittest.mock import Mock
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
async def get_client(aiohttp_client, validator):
"""Generate a client that hits a view decorated with validator."""
app = web.Application()
app["hass"] = Mock(is_running=True)
class TestView(HomeAssistantView):
url = "/"
name = "test"
requires_auth = False
@validator
async def post(self, request, data):
"""Test method."""
return b""
TestView().register(app, app.router)
client = await aiohttp_client(app)
return client
async def test_validator(aiohttp_client):
"""Test the validator."""
client = await get_client(
aiohttp_client, RequestDataValidator(vol.Schema({vol.Required("test"): str}))
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 400
async def test_validator_allow_empty(aiohttp_client):
"""Test the validator with empty data."""
client = await get_client(
aiohttp_client,
RequestDataValidator(
vol.Schema(
{
# Although we allow empty, our schema should still be able
# to validate an empty dict.
vol.Optional("test"): str
}
),
allow_empty=True,
),
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 200
| """Test data validator decorator."""
from unittest.mock import Mock
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
async def get_client(aiohttp_client, validator):
"""Generate a client that hits a view decorated with validator."""
app = web.Application()
app["hass"] = Mock(is_running=True)
class TestView(HomeAssistantView):
url = "/"
name = "test"
requires_auth = False
@validator
async def post(self, request, data):
"""Test method."""
return b""
TestView().register(app, app.router)
client = await aiohttp_client(app)
return client
async def test_validator(aiohttp_client):
"""Test the validator."""
client = await get_client(
aiohttp_client, RequestDataValidator(vol.Schema({vol.Required("test"): str}))
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 400
async def test_validator_allow_empty(aiohttp_client):
"""Test the validator with empty data."""
client = await get_client(
aiohttp_client,
RequestDataValidator(
vol.Schema(
{
# Although we allow empty, our schema should still be able
# to validate an empty dict.
vol.Optional("test"): str
}
),
allow_empty=True,
),
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 200 | en | 0.715168 | Test data validator decorator. Generate a client that hits a view decorated with validator. Test method. Test the validator. Test the validator with empty data. # Although we allow empty, our schema should still be able # to validate an empty dict. | 2.825898 | 3 |
Conversely_Frontend/app/Server/ukjp/templates.py | sam-aldis/Conversley | 0 | 9416 | import days
STAGE_INIT = 0
STAGE_CHALLENGE_INIT = 1
STAGE_BOOKED = 2
def createJSONTemplate(data):
pass
messages = [
"Hey {{first_name}}, thankyou for your enquiry to be one of our Transformation Challengers",
"We have 2 Challenges available for you:\n\nThe 8 Week Bikini Challenge which helps you shed 3-9kg of unwanted body fat, flattens your tummy and tones your arms, abs, legs and butt.\n\nOr our 9in6 Challenge which helps you drop 9+kgs of pure fat in just 6 Weeks.",
"Please choose which challenge information you would like below..."
]
callbacks = {
"INIT_8WBC" : [
{
"type": "message",
"text" : "Thank you {{first_name}},\n\
The FREE 8 Week Bikini Challenge is a done for you - step by step PROVEN program that helps you lose the 3-7kg of unwanted body fat, flatten your tummy and tone your arms, legs and butt.\n\
\n\
This is your chance to transform your body in just 8 weeks for FREE"
},
{
"type" : "message",
"text" : "In exchange for the program being FREE....we ask that you allow us to share your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. \n\
(Please note, a small refundable deposit applies to keep you motivated throughout the 8 weeks)"
},
{
"type": "message",
"text": "The challenge is starting Monday 12th of June and to start your 8 Week Bikini Challenge, we just require you to attend the upcoming information meeting at the facility to quickly go over the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join. Simply a meet and chat.\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_8wbc"
}
],
"INIT_9IN6" : [
{
"type" : "message",
"text" : "Thank you {{first_name}},\n\
The 9in6 Transformation Challenge is a done for you - step by step PROVEN program that helps you lose 9kg kilos of unwanted body fat, flatten your tummy and tone your arms, legs and butt in just 6 weeks.\n\
\
\nThis is your chance to transform your body in just 6 weeks for FREE!"
},
{
"type" : "message",
"text" : "In exchange for the program, we ask that you allow us to showcase your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. When you complete the program its FREE. \n\
Please note, a small refundable \"incentive deposit\" applies to keep you motivated throughout the 6 weeks."
},
{
"type" : "message",
"text" : "The challenge is starting Monday 12th of June and to start your 9kg 6-week challenge, we require you to attend the upcoming information meeting where we explain the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join at the end, just an opportunity for you learn about the program and how you can lose 9kg in 6 weeks for FREE\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_9in6"
}
],
"TIME_TABLE_8WBC" : [
{
"type" : "message",
"text" : "Sure here's our lesson time table.."
},
{
"type" : "file",
"url" : "http://thetransformationcentre.com.au/img/timetable.pdf"
},
{
"type" : "json",
"template" : "init_8wbc"
}
]
}
def build_json_templates():
JSON_TEMPLATES = {
"init" :{
"template_type" : "generic",
"elements" : [
{
"title" : "The Transformation Centre",
"image_url" : "http://thetransformationcentre.com.au/img/spinner/1.png",
"subtitle":"Choose one of our Challenges below",
"buttons":[
{
"type":"postback",
"payload":"INIT_8WBC",
"title":"8 Week Bikini Challenge"
},{
"type":"postback",
"title":"9kg 6 Week Challenge",
"payload":"INIT_9IN6"
}
]
}
]
},
"init_8wbc" : {
"template_type" : "generic",
"elements" : [
{
"title" : "8 Week Bikini Challenge Meeting",
"subtitle":"RSVP by clicking a suitable data below",
"buttons":[
# {
# "type":"postback",
# "payload":"BOOK_CONSULT_8WBC_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1],
# "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1]
# }
# },
{
"type":"postback",
"title": "Sat 10th June 09.45",
"payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945"
}
]
}
]
},
"init_9in6" : {
"template_type" : "generic",
"elements" : [
{
"title" : "9kg 6 Week Challenge Info Meeting",
"subtitle":"RSVP by clicking a suitable date below",
"buttons":[
# {
# "type":"postback",
# "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1],
# "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1]
# }
{
"type":"postback",
"title": "Sat 10th June 09.45",
"payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945"
}
# ,{
# "type":"postback",
# "title": days.getAppointmentDates(2)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[0] + " " + days.getAppointmentDates(2)[1],
# "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(2)[2] + "_DAY_" + days.getAppointmentDates(2)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[1]
# }
]
}
]
}
}
return JSON_TEMPLATES | import days
STAGE_INIT = 0
STAGE_CHALLENGE_INIT = 1
STAGE_BOOKED = 2
def createJSONTemplate(data):
pass
messages = [
"Hey {{first_name}}, thankyou for your enquiry to be one of our Transformation Challengers",
"We have 2 Challenges available for you:\n\nThe 8 Week Bikini Challenge which helps you shed 3-9kg of unwanted body fat, flattens your tummy and tones your arms, abs, legs and butt.\n\nOr our 9in6 Challenge which helps you drop 9+kgs of pure fat in just 6 Weeks.",
"Please choose which challenge information you would like below..."
]
callbacks = {
"INIT_8WBC" : [
{
"type": "message",
"text" : "Thank you {{first_name}},\n\
The FREE 8 Week Bikini Challenge is a done for you - step by step PROVEN program that helps you lose the 3-7kg of unwanted body fat, flatten your tummy and tone your arms, legs and butt.\n\
\n\
This is your chance to transform your body in just 8 weeks for FREE"
},
{
"type" : "message",
"text" : "In exchange for the program being FREE....we ask that you allow us to share your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. \n\
(Please note, a small refundable deposit applies to keep you motivated throughout the 8 weeks)"
},
{
"type": "message",
"text": "The challenge is starting Monday 12th of June and to start your 8 Week Bikini Challenge, we just require you to attend the upcoming information meeting at the facility to quickly go over the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join. Simply a meet and chat.\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_8wbc"
}
],
"INIT_9IN6" : [
{
"type" : "message",
"text" : "Thank you {{first_name}},\n\
The 9in6 Transformation Challenge is a done for you - step by step PROVEN program that helps you lose 9kg kilos of unwanted body fat, flatten your tummy and tone your arms, legs and butt in just 6 weeks.\n\
\
\nThis is your chance to transform your body in just 6 weeks for FREE!"
},
{
"type" : "message",
"text" : "In exchange for the program, we ask that you allow us to showcase your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. When you complete the program its FREE. \n\
Please note, a small refundable \"incentive deposit\" applies to keep you motivated throughout the 6 weeks."
},
{
"type" : "message",
"text" : "The challenge is starting Monday 12th of June and to start your 9kg 6-week challenge, we require you to attend the upcoming information meeting where we explain the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join at the end, just an opportunity for you learn about the program and how you can lose 9kg in 6 weeks for FREE\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_9in6"
}
],
"TIME_TABLE_8WBC" : [
{
"type" : "message",
"text" : "Sure here's our lesson time table.."
},
{
"type" : "file",
"url" : "http://thetransformationcentre.com.au/img/timetable.pdf"
},
{
"type" : "json",
"template" : "init_8wbc"
}
]
}
def build_json_templates():
JSON_TEMPLATES = {
"init" :{
"template_type" : "generic",
"elements" : [
{
"title" : "The Transformation Centre",
"image_url" : "http://thetransformationcentre.com.au/img/spinner/1.png",
"subtitle":"Choose one of our Challenges below",
"buttons":[
{
"type":"postback",
"payload":"INIT_8WBC",
"title":"8 Week Bikini Challenge"
},{
"type":"postback",
"title":"9kg 6 Week Challenge",
"payload":"INIT_9IN6"
}
]
}
]
},
"init_8wbc" : {
"template_type" : "generic",
"elements" : [
{
"title" : "8 Week Bikini Challenge Meeting",
"subtitle":"RSVP by clicking a suitable data below",
"buttons":[
# {
# "type":"postback",
# "payload":"BOOK_CONSULT_8WBC_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1],
# "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1]
# }
# },
{
"type":"postback",
"title": "Sat 10th June 09.45",
"payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945"
}
]
}
]
},
"init_9in6" : {
"template_type" : "generic",
"elements" : [
{
"title" : "9kg 6 Week Challenge Info Meeting",
"subtitle":"RSVP by clicking a suitable date below",
"buttons":[
# {
# "type":"postback",
# "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1],
# "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1]
# }
{
"type":"postback",
"title": "Sat 10th June 09.45",
"payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945"
}
# ,{
# "type":"postback",
# "title": days.getAppointmentDates(2)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[0] + " " + days.getAppointmentDates(2)[1],
# "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(2)[2] + "_DAY_" + days.getAppointmentDates(2)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[1]
# }
]
}
]
}
}
return JSON_TEMPLATES | en | 0.655803 | # { # "type":"postback", # "payload":"BOOK_CONSULT_8WBC_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1], # "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1] # } # }, # { # "type":"postback", # "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1], # "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1] # } # ,{ # "type":"postback", # "title": days.getAppointmentDates(2)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[0] + " " + days.getAppointmentDates(2)[1], # "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(2)[2] + "_DAY_" + days.getAppointmentDates(2)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[1] # } | 2.185561 | 2 |
var/spack/repos/builtin/packages/pagmo2/package.py | jeanbez/spack | 0 | 9417 | <reponame>jeanbez/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Pagmo2(CMakePackage):
"""Parallel Global Multiobjective Optimizer (and its Python alter ego
PyGMO) is a C++ / Python platform to perform parallel computations of
optimisation tasks (global and local) via the asynchronous generalized
island model."""
homepage = "https://esa.github.io/pagmo2/"
url = "https://github.com/esa/pagmo2/archive/v2.18.0.tar.gz"
git = "https://github.com/esa/pagmo2.git"
maintainers = ['liuyangzhuan']
version('master', branch='master')
version('2.18.0', sha256='5ad40bf3aa91857a808d6b632d9e1020341a33f1a4115d7a2b78b78fd063ae31')
depends_on('boost+system+serialization+thread')
depends_on('intel-tbb')
depends_on('mpi')
depends_on('[email protected]:', type='build')
variant('shared', default=True, description='Build shared libraries')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
]
return args
| # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Pagmo2(CMakePackage):
"""Parallel Global Multiobjective Optimizer (and its Python alter ego
PyGMO) is a C++ / Python platform to perform parallel computations of
optimisation tasks (global and local) via the asynchronous generalized
island model."""
homepage = "https://esa.github.io/pagmo2/"
url = "https://github.com/esa/pagmo2/archive/v2.18.0.tar.gz"
git = "https://github.com/esa/pagmo2.git"
maintainers = ['liuyangzhuan']
version('master', branch='master')
version('2.18.0', sha256='5ad40bf3aa91857a808d6b632d9e1020341a33f1a4115d7a2b78b78fd063ae31')
depends_on('boost+system+serialization+thread')
depends_on('intel-tbb')
depends_on('mpi')
depends_on('[email protected]:', type='build')
variant('shared', default=True, description='Build shared libraries')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
]
return args | en | 0.726431 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Parallel Global Multiobjective Optimizer (and its Python alter ego PyGMO) is a C++ / Python platform to perform parallel computations of optimisation tasks (global and local) via the asynchronous generalized island model. | 1.471621 | 1 |
interferogram/sentinel/fetchCalES.py | earthobservatory/ariamh-pub | 4 | 9418 | #!/usr/bin/env python3
import os, sys, re, json, requests, datetime, tarfile, argparse
from pprint import pprint
import numpy as np
from utils.UrlUtils import UrlUtils
server = 'https://qc.sentinel1.eo.esa.int/'
cal_re = re.compile(r'S1\w_AUX_CAL')
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Fetch calibration auxiliary files ingested into HySDS')
parser.add_argument('-o', '--output', dest='outdir', type=str, default='.',
help='Path to output directory')
parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
help="Don't download anything; just output the URLs")
return parser.parse_args()
def download_file(url, outdir='.', session=None):
'''
Download file to specified directory.
'''
if session is None:
session = requests.session()
path = "%s.tgz" % os.path.join(outdir, os.path.basename(url))
print('Downloading URL: ', url)
request = session.get(url, stream=True, verify=False)
request.raise_for_status()
with open(path,'wb') as f:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return path
def untar_file(path, outdir):
'''
Extract aux cal files.
'''
if not tarfile.is_tarfile(path):
raise RuntimeError("%s is not a tarfile." % path)
with tarfile.open(path) as f:
f.extractall(outdir)
def get_active_ids(es_url):
"""Query for the active calibration IDs."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": "S1_AUX_CAL_ACTIVE"}},
]
}
},
"sort":[ { "starttime": { "order": "desc" } } ]
}
es_index = "grq_*_s1-aux_cal_active"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
#pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find S1_AUX_CAL_ACTIVE at %s." % search_url)
return result['hits']['hits'][0]['_source']['metadata']['active_ids']
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def get_cal_url(id, es_url):
"""Query for the active calibration url."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": id}},
]
}
},
"fields": ["urls", "metadata.archive_filename"]
}
es_index = "grq_*_s1-aux_cal"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find %s at %s." % (id, search_url))
urls = result['hits']['hits'][0]['fields']['urls']
archive_fname = result['hits']['hits'][0]['fields']['metadata.archive_filename'][0]
url = [x for x in urls if x.startswith('http')][0]
#print(urls)
#print(url)
#print(archive_fname)
return os.path.join(url, archive_fname)
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def fetch(outdir, dry_run):
# get endpoint configurations
uu = UrlUtils()
es_url = uu.rest_url
# get active calibration ids
active_ids = get_active_ids(es_url)
print(active_ids)
# get urls for active calibration files
cal_urls = [get_cal_url(i, es_url) for i in active_ids]
print(cal_urls)
if len(cal_urls) == 0:
print('Failed to find calibration auxiliary files')
if dry_run: print('\n'.join(cal_urls))
else:
if not os.path.isdir(outdir): os.makedirs(outdir)
for cal_url in cal_urls:
try: cal_file = download_file(cal_url, outdir)
except:
print('Failed to download URL: ', cal_url)
raise
try: cal_dir = untar_file(cal_file, outdir)
except:
print('Failed to untar: ', cal_file)
raise
os.unlink(cal_file)
if __name__ == '__main__':
inps = cmdLineParse()
fetch(inps.outdir, inps.dry_run)
| #!/usr/bin/env python3
import os, sys, re, json, requests, datetime, tarfile, argparse
from pprint import pprint
import numpy as np
from utils.UrlUtils import UrlUtils
server = 'https://qc.sentinel1.eo.esa.int/'
cal_re = re.compile(r'S1\w_AUX_CAL')
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Fetch calibration auxiliary files ingested into HySDS')
parser.add_argument('-o', '--output', dest='outdir', type=str, default='.',
help='Path to output directory')
parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
help="Don't download anything; just output the URLs")
return parser.parse_args()
def download_file(url, outdir='.', session=None):
'''
Download file to specified directory.
'''
if session is None:
session = requests.session()
path = "%s.tgz" % os.path.join(outdir, os.path.basename(url))
print('Downloading URL: ', url)
request = session.get(url, stream=True, verify=False)
request.raise_for_status()
with open(path,'wb') as f:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return path
def untar_file(path, outdir):
'''
Extract aux cal files.
'''
if not tarfile.is_tarfile(path):
raise RuntimeError("%s is not a tarfile." % path)
with tarfile.open(path) as f:
f.extractall(outdir)
def get_active_ids(es_url):
"""Query for the active calibration IDs."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": "S1_AUX_CAL_ACTIVE"}},
]
}
},
"sort":[ { "starttime": { "order": "desc" } } ]
}
es_index = "grq_*_s1-aux_cal_active"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
#pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find S1_AUX_CAL_ACTIVE at %s." % search_url)
return result['hits']['hits'][0]['_source']['metadata']['active_ids']
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def get_cal_url(id, es_url):
"""Query for the active calibration url."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": id}},
]
}
},
"fields": ["urls", "metadata.archive_filename"]
}
es_index = "grq_*_s1-aux_cal"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find %s at %s." % (id, search_url))
urls = result['hits']['hits'][0]['fields']['urls']
archive_fname = result['hits']['hits'][0]['fields']['metadata.archive_filename'][0]
url = [x for x in urls if x.startswith('http')][0]
#print(urls)
#print(url)
#print(archive_fname)
return os.path.join(url, archive_fname)
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def fetch(outdir, dry_run):
# get endpoint configurations
uu = UrlUtils()
es_url = uu.rest_url
# get active calibration ids
active_ids = get_active_ids(es_url)
print(active_ids)
# get urls for active calibration files
cal_urls = [get_cal_url(i, es_url) for i in active_ids]
print(cal_urls)
if len(cal_urls) == 0:
print('Failed to find calibration auxiliary files')
if dry_run: print('\n'.join(cal_urls))
else:
if not os.path.isdir(outdir): os.makedirs(outdir)
for cal_url in cal_urls:
try: cal_file = download_file(cal_url, outdir)
except:
print('Failed to download URL: ', cal_url)
raise
try: cal_dir = untar_file(cal_file, outdir)
except:
print('Failed to untar: ', cal_file)
raise
os.unlink(cal_file)
if __name__ == '__main__':
inps = cmdLineParse()
fetch(inps.outdir, inps.dry_run)
| en | 0.564127 | #!/usr/bin/env python3 Command line parser. Download file to specified directory. Extract aux cal files. Query for the active calibration IDs. #pprint(result) Query for the active calibration url. #print(urls) #print(url) #print(archive_fname) # get endpoint configurations # get active calibration ids # get urls for active calibration files | 2.735443 | 3 |
www/conservancy/urls.py | stain/conservancy-website | 0 | 9419 | # Copyright 2005-2008, <NAME>
# Copyright 2010, 2012 <NAME>
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute, modify and/or redistribute modified versions of
# this program under the terms of the GNU Affero General Public License
# (AGPL) as published by the Free Software Foundation (FSF), either
# version 3 of the License, or (at your option) any later version of the
# AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, include
from django.contrib import admin, admindocs
from conservancy import feeds, frontpage, sponsors
import conservancy.apps.fundgoal.views as fundgoal_views
import conservancy.static.views as static_views
admin.autodiscover()
urlpatterns = [
url(r'^$', frontpage.view),
url(r'^sponsors$', frontpage.view),
url(r'^sponsors/$', sponsors.view),
url(r'^sponsors/index.html$', sponsors.view),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^feeds/blog/?$', feeds.BlogFeed()),
url(r'^feeds/news/?$', feeds.PressReleaseFeed()),
url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()),
url(r'^feeds/?$', feeds.view),
url(r'^news(/|$)', include('conservancy.apps.news.urls')),
url(r'^blog(/|$)', include('conservancy.apps.blog.urls')),
# formerly static templated things... (dirs with templates)
url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler),
url(r'^error', static_views.index),
url(r'^about', static_views.index),
url(r'^donate', static_views.index),
url(r'^copyleft-compliance', static_views.index,
{'fundraiser_sought' : 'vmware-match-0'}),
url(r'^projects', static_views.index),
url(r'^npoacct', static_views.index,
{'fundraiser_sought' : 'npoacct'}),
url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')),
url(r'^overview', static_views.index),
url(r'^privacy-policy', static_views.index),
url(r'^supporter', include('conservancy.apps.supporter.urls')),
url(r'^fundraiser_data', fundgoal_views.view),
]
| # Copyright 2005-2008, <NAME>
# Copyright 2010, 2012 <NAME>
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute, modify and/or redistribute modified versions of
# this program under the terms of the GNU Affero General Public License
# (AGPL) as published by the Free Software Foundation (FSF), either
# version 3 of the License, or (at your option) any later version of the
# AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, include
from django.contrib import admin, admindocs
from conservancy import feeds, frontpage, sponsors
import conservancy.apps.fundgoal.views as fundgoal_views
import conservancy.static.views as static_views
admin.autodiscover()
urlpatterns = [
url(r'^$', frontpage.view),
url(r'^sponsors$', frontpage.view),
url(r'^sponsors/$', sponsors.view),
url(r'^sponsors/index.html$', sponsors.view),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^feeds/blog/?$', feeds.BlogFeed()),
url(r'^feeds/news/?$', feeds.PressReleaseFeed()),
url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()),
url(r'^feeds/?$', feeds.view),
url(r'^news(/|$)', include('conservancy.apps.news.urls')),
url(r'^blog(/|$)', include('conservancy.apps.blog.urls')),
# formerly static templated things... (dirs with templates)
url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler),
url(r'^error', static_views.index),
url(r'^about', static_views.index),
url(r'^donate', static_views.index),
url(r'^copyleft-compliance', static_views.index,
{'fundraiser_sought' : 'vmware-match-0'}),
url(r'^projects', static_views.index),
url(r'^npoacct', static_views.index,
{'fundraiser_sought' : 'npoacct'}),
url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')),
url(r'^overview', static_views.index),
url(r'^privacy-policy', static_views.index),
url(r'^supporter', include('conservancy.apps.supporter.urls')),
url(r'^fundraiser_data', fundgoal_views.view),
]
| en | 0.865558 | # Copyright 2005-2008, <NAME> # Copyright 2010, 2012 <NAME> # This software's license gives you freedom; you can copy, convey, # propagate, redistribute, modify and/or redistribute modified versions of # this program under the terms of the GNU Affero General Public License # (AGPL) as published by the Free Software Foundation (FSF), either # version 3 of the License, or (at your option) any later version of the # AGPL published by the FSF. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero # General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program in a file in the toplevel directory called # "AGPLv3". If not, see <http://www.gnu.org/licenses/>. # formerly static templated things... (dirs with templates) | 1.674073 | 2 |
graphene_spike_tests/acceptances/test_query.py | FabienArcellier/spike-graphene-flask | 1 | 9420 | import unittest
from unittest.mock import Mock
from graphene import Schema
from graphene.test import Client
from graphene_spike.query import Query
class MainTest(unittest.TestCase):
def setUp(self):
self.schema = Schema(query=Query)
self.client = client = Client(self.schema)
def test_hello_should_work_without_argument(self):
# Assign
query_string = '{ hello }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 18 !"})
def test_hello_should_write_the_giving_name(self):
# Assign
query_string = '{ hello(name: "Fabien") }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello Fabien, you have 18 !"})
def test_hello_should_write_the_giving_age(self):
# Assign
query_string = '{ hello(age: 24) }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 24 !"})
def test_goodbye_should_giving_a_response(self):
# Assign
query_string = '{ goodbye }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"goodbye": "See ya!"})
| import unittest
from unittest.mock import Mock
from graphene import Schema
from graphene.test import Client
from graphene_spike.query import Query
class MainTest(unittest.TestCase):
def setUp(self):
self.schema = Schema(query=Query)
self.client = client = Client(self.schema)
def test_hello_should_work_without_argument(self):
# Assign
query_string = '{ hello }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 18 !"})
def test_hello_should_write_the_giving_name(self):
# Assign
query_string = '{ hello(name: "Fabien") }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello Fabien, you have 18 !"})
def test_hello_should_write_the_giving_age(self):
# Assign
query_string = '{ hello(age: 24) }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 24 !"})
def test_goodbye_should_giving_a_response(self):
# Assign
query_string = '{ goodbye }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"goodbye": "See ya!"})
| en | 0.563217 | # Assign # Acts # Assert # Assign # Acts # Assert # Assign # Acts # Assert # Assign # Acts # Assert | 2.928337 | 3 |
clikan.py | davidventasmarin/clikan | 0 | 9421 | <gh_stars>0
from rich import print
from rich.console import Console
from rich.table import Table
import click
from click_default_group import DefaultGroup
import yaml
import os
##from terminaltables import SingleTable
import sys
from textwrap import wrap
import collections
import datetime
import configparser
import pkg_resources # part of setuptools
VERSION = pkg_resources.require("clikan")[0].version
class Config(object):
"""The config in this example only holds aliases."""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items('aliases'))
except configparser.NoSectionError:
pass
pass_config = click.make_pass_decorator(Config, ensure=True)
class AliasedGroup(DefaultGroup):
"""This subclass of a group supports looking up aliases in a config
file and with a bit of magic.
"""
def get_command(self, ctx, cmd_name):
# Step one: bulitin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# Step two: find the config object and ensure it's there. This
# will create the config object is missing.
cfg = ctx.ensure_object(Config)
# Step three: lookup an explicit command aliase in the config
if cmd_name in cfg.aliases:
actual_cmd = cfg.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
def read_config(ctx, param, value):
"""Callback that is used whenever --config is passed. We use this to
always load the correct config. This means that the config is loaded
even if the group itself never executes so our aliases stay always
available.
"""
cfg = ctx.ensure_object(Config)
if value is None:
value = os.path.join(os.path.dirname(__file__), 'aliases.ini')
cfg.read_config(value)
return value
@click.version_option(VERSION)
@click.command(cls=AliasedGroup, default='show', default_if_no_args=True)
def clikan():
"""clikan: CLI personal kanban """
@clikan.command()
def configure():
"""Place default config file in CLIKAN_HOME or HOME"""
home = get_clikan_home()
data_path = os.path.join(home, ".clikan.dat")
config_path = os.path.join(home, ".clikan.yaml")
if (os.path.exists(config_path) and not
click.confirm('Config file exists. Do you want to overwrite?')):
return
with open(config_path, 'w') as outfile:
conf = {'clikan_data': data_path}
yaml.dump(conf, outfile, default_flow_style=False)
click.echo("Creating %s" % config_path)
@clikan.command()
@click.argument('task')
def add(task):
"""Add a task in todo"""
if len(task) > 40:
click.echo('Task must be shorter than 40 chars. Brevity counts.')
else:
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
if ('limits' in config and 'todo' in config['limits'] and
int(config['limits']['todo']) <= len(todos)):
click.echo('No new todos, limit reached already.')
else:
od = collections.OrderedDict(sorted(dd['data'].items()))
new_id = 1
if bool(od):
new_id = next(reversed(od)) + 1
entry = ['todo', task, timestamp(), timestamp()]
dd['data'].update({new_id: entry})
click.echo("Creating new task w/ id: %d -> %s" % (new_id, task))
write_data(config, dd)
@clikan.command()
@click.argument('id')
def delete(id):
"""Delete task"""
config = read_config_yaml()
dd = read_data(config)
item = dd['data'].get(int(id))
if item is None:
click.echo('No existing task with that id.')
else:
item[0] = 'deleted'
item[2] = timestamp()
dd['deleted'].update({int(id): item})
dd['data'].pop(int(id))
write_data(config, dd)
click.echo('Removed task %d.' % int(id))
@clikan.command()
@click.argument('id')
def promote(id):
"""Promote task"""
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
item = dd['data'].get(int(id))
if item[0] == 'todo':
if ('limits' in config and 'wip' in config['limits'] and
int(config['limits']['wip']) <= len(inprogs)):
click.echo('No new tasks, limit reached already.')
else:
click.echo('Promoting task %s to in-progress.' % id)
dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]]
write_data(config, dd)
elif item[0] == 'inprogress':
click.echo('Promoting task %s to done.' % id)
dd['data'][int(id)] = ['done', item[1], timestamp(), item[3]]
write_data(config, dd)
else:
click.echo('Already done, can not promote %s' % id)
@clikan.command()
@click.argument('id')
def regress(id):
"""Regress task"""
config = read_config_yaml()
dd = read_data(config)
item = dd['data'].get(int(id))
if item[0] == 'done':
click.echo('Regressing task %s to in-progress.' % id)
dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]]
write_data(config, dd)
elif item[0] == 'inprogress':
click.echo('Regressing task %s to todo.' % id)
dd['data'][int(id)] = ['todo', item[1], timestamp(), item[3]]
write_data(config, dd)
else:
click.echo('Already in todo, can not regress %s' % id)
@clikan.command()
def show():
console = Console()
"""Show tasks in clikan"""
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
if 'limits' in config and 'done' in config['limits']:
dones = dones[0:int(config['limits']['done'])]
else:
dones = dones[0:10]
todos = '\n'.join([str(x) for x in todos])
inprogs = '\n'.join([str(x) for x in inprogs])
dones = '\n'.join([str(x) for x in dones])
# td = [
# ['todo', 'in-progress', '[bold magenta]done[/bold magenta]'],
# ['', '', ''],
# ]
#table = SingleTable(td, 'clikan v.{}'.format(VERSION))
# table.inner_heading_row_border = False
# table.inner_row_border = True
# table.justify_columns = {0: 'center', 1: 'center', 2: 'center'}
table = Table(show_header=True, show_footer=True)
table.add_column("[bold yellow]todo[/bold yellow]", no_wrap=True, footer="clikan")
table.add_column('[bold green]in-progress[/bold green]', no_wrap=True)
table.add_column('[bold magenta]done[/bold magenta]', no_wrap=True, footer="v.{}".format(VERSION))
# def wrap_lines(lines, column_index):
# max_width = table.column_max_width(column_index)
# packed = [line for line in lines if line.strip() != '']
# wrapped = [wrap(line, max_width, break_long_words=False,
# replace_whitespace=False) for line in packed]
# return '\n'.join(['\n'.join(w) for w in wrapped])
# for index, section in enumerate((todos, inprogs, dones)):
# table.table_data[1][index] = wrap_lines(section.splitlines(), index)
table.add_row(todos, inprogs, dones)
console.print(table)
#print(table.table)
def read_data(config):
"""Read the existing data from the config datasource"""
try:
with open(config["clikan_data"], 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print("Ensure %s exists, as you specified it "
"as the clikan data file." % config['clikan_data'])
print(exc)
except IOError:
click.echo("No data, initializing data file.")
write_data(config, {"data": {}, "deleted": {}})
with open(config["clikan_data"], 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def write_data(config, data):
"""Write the data to the config datasource"""
with open(config["clikan_data"], 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
def get_clikan_home():
home = os.environ.get('CLIKAN_HOME')
if not home:
home = os.path.expanduser('~')
return home
def read_config_yaml():
"""Read the app config from ~/.clikan.yaml"""
try:
home = get_clikan_home()
with open(home + "/.clikan.yaml", 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError:
print("Ensure %s/.clikan.yaml is valid, expected YAML." % home)
sys.exit()
except IOError:
print("Ensure %s/.clikan.yaml exists and is valid." % home)
sys.exit()
def split_items(config, dd):
todos = []
inprogs = []
dones = []
for key, value in dd['data'].items():
if value[0] == 'todo':
todos.append("[%d] %s" % (key, value[1]))
elif value[0] == 'inprogress':
inprogs.append("[%d] %s" % (key, value[1]))
else:
dones.insert(0, "[%d] %s" % (key, value[1]))
return todos, inprogs, dones
def timestamp():
return '{:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now())
| from rich import print
from rich.console import Console
from rich.table import Table
import click
from click_default_group import DefaultGroup
import yaml
import os
##from terminaltables import SingleTable
import sys
from textwrap import wrap
import collections
import datetime
import configparser
import pkg_resources # part of setuptools
VERSION = pkg_resources.require("clikan")[0].version
class Config(object):
"""The config in this example only holds aliases."""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items('aliases'))
except configparser.NoSectionError:
pass
pass_config = click.make_pass_decorator(Config, ensure=True)
class AliasedGroup(DefaultGroup):
"""This subclass of a group supports looking up aliases in a config
file and with a bit of magic.
"""
def get_command(self, ctx, cmd_name):
# Step one: bulitin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# Step two: find the config object and ensure it's there. This
# will create the config object is missing.
cfg = ctx.ensure_object(Config)
# Step three: lookup an explicit command aliase in the config
if cmd_name in cfg.aliases:
actual_cmd = cfg.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
def read_config(ctx, param, value):
"""Callback that is used whenever --config is passed. We use this to
always load the correct config. This means that the config is loaded
even if the group itself never executes so our aliases stay always
available.
"""
cfg = ctx.ensure_object(Config)
if value is None:
value = os.path.join(os.path.dirname(__file__), 'aliases.ini')
cfg.read_config(value)
return value
@click.version_option(VERSION)
@click.command(cls=AliasedGroup, default='show', default_if_no_args=True)
def clikan():
"""clikan: CLI personal kanban """
@clikan.command()
def configure():
"""Place default config file in CLIKAN_HOME or HOME"""
home = get_clikan_home()
data_path = os.path.join(home, ".clikan.dat")
config_path = os.path.join(home, ".clikan.yaml")
if (os.path.exists(config_path) and not
click.confirm('Config file exists. Do you want to overwrite?')):
return
with open(config_path, 'w') as outfile:
conf = {'clikan_data': data_path}
yaml.dump(conf, outfile, default_flow_style=False)
click.echo("Creating %s" % config_path)
@clikan.command()
@click.argument('task')
def add(task):
"""Add a task in todo"""
if len(task) > 40:
click.echo('Task must be shorter than 40 chars. Brevity counts.')
else:
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
if ('limits' in config and 'todo' in config['limits'] and
int(config['limits']['todo']) <= len(todos)):
click.echo('No new todos, limit reached already.')
else:
od = collections.OrderedDict(sorted(dd['data'].items()))
new_id = 1
if bool(od):
new_id = next(reversed(od)) + 1
entry = ['todo', task, timestamp(), timestamp()]
dd['data'].update({new_id: entry})
click.echo("Creating new task w/ id: %d -> %s" % (new_id, task))
write_data(config, dd)
@clikan.command()
@click.argument('id')
def delete(id):
"""Delete task"""
config = read_config_yaml()
dd = read_data(config)
item = dd['data'].get(int(id))
if item is None:
click.echo('No existing task with that id.')
else:
item[0] = 'deleted'
item[2] = timestamp()
dd['deleted'].update({int(id): item})
dd['data'].pop(int(id))
write_data(config, dd)
click.echo('Removed task %d.' % int(id))
@clikan.command()
@click.argument('id')
def promote(id):
"""Promote task"""
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
item = dd['data'].get(int(id))
if item[0] == 'todo':
if ('limits' in config and 'wip' in config['limits'] and
int(config['limits']['wip']) <= len(inprogs)):
click.echo('No new tasks, limit reached already.')
else:
click.echo('Promoting task %s to in-progress.' % id)
dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]]
write_data(config, dd)
elif item[0] == 'inprogress':
click.echo('Promoting task %s to done.' % id)
dd['data'][int(id)] = ['done', item[1], timestamp(), item[3]]
write_data(config, dd)
else:
click.echo('Already done, can not promote %s' % id)
@clikan.command()
@click.argument('id')
def regress(id):
"""Regress task"""
config = read_config_yaml()
dd = read_data(config)
item = dd['data'].get(int(id))
if item[0] == 'done':
click.echo('Regressing task %s to in-progress.' % id)
dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]]
write_data(config, dd)
elif item[0] == 'inprogress':
click.echo('Regressing task %s to todo.' % id)
dd['data'][int(id)] = ['todo', item[1], timestamp(), item[3]]
write_data(config, dd)
else:
click.echo('Already in todo, can not regress %s' % id)
@clikan.command()
def show():
console = Console()
"""Show tasks in clikan"""
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
if 'limits' in config and 'done' in config['limits']:
dones = dones[0:int(config['limits']['done'])]
else:
dones = dones[0:10]
todos = '\n'.join([str(x) for x in todos])
inprogs = '\n'.join([str(x) for x in inprogs])
dones = '\n'.join([str(x) for x in dones])
# td = [
# ['todo', 'in-progress', '[bold magenta]done[/bold magenta]'],
# ['', '', ''],
# ]
#table = SingleTable(td, 'clikan v.{}'.format(VERSION))
# table.inner_heading_row_border = False
# table.inner_row_border = True
# table.justify_columns = {0: 'center', 1: 'center', 2: 'center'}
table = Table(show_header=True, show_footer=True)
table.add_column("[bold yellow]todo[/bold yellow]", no_wrap=True, footer="clikan")
table.add_column('[bold green]in-progress[/bold green]', no_wrap=True)
table.add_column('[bold magenta]done[/bold magenta]', no_wrap=True, footer="v.{}".format(VERSION))
# def wrap_lines(lines, column_index):
# max_width = table.column_max_width(column_index)
# packed = [line for line in lines if line.strip() != '']
# wrapped = [wrap(line, max_width, break_long_words=False,
# replace_whitespace=False) for line in packed]
# return '\n'.join(['\n'.join(w) for w in wrapped])
# for index, section in enumerate((todos, inprogs, dones)):
# table.table_data[1][index] = wrap_lines(section.splitlines(), index)
table.add_row(todos, inprogs, dones)
console.print(table)
#print(table.table)
def read_data(config):
"""Read the existing data from the config datasource"""
try:
with open(config["clikan_data"], 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print("Ensure %s exists, as you specified it "
"as the clikan data file." % config['clikan_data'])
print(exc)
except IOError:
click.echo("No data, initializing data file.")
write_data(config, {"data": {}, "deleted": {}})
with open(config["clikan_data"], 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def write_data(config, data):
"""Write the data to the config datasource"""
with open(config["clikan_data"], 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
def get_clikan_home():
home = os.environ.get('CLIKAN_HOME')
if not home:
home = os.path.expanduser('~')
return home
def read_config_yaml():
"""Read the app config from ~/.clikan.yaml"""
try:
home = get_clikan_home()
with open(home + "/.clikan.yaml", 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError:
print("Ensure %s/.clikan.yaml is valid, expected YAML." % home)
sys.exit()
except IOError:
print("Ensure %s/.clikan.yaml exists and is valid." % home)
sys.exit()
def split_items(config, dd):
todos = []
inprogs = []
dones = []
for key, value in dd['data'].items():
if value[0] == 'todo':
todos.append("[%d] %s" % (key, value[1]))
elif value[0] == 'inprogress':
inprogs.append("[%d] %s" % (key, value[1]))
else:
dones.insert(0, "[%d] %s" % (key, value[1]))
return todos, inprogs, dones
def timestamp():
return '{:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now()) | en | 0.594157 | ##from terminaltables import SingleTable # part of setuptools The config in this example only holds aliases. This subclass of a group supports looking up aliases in a config file and with a bit of magic. # Step one: bulitin commands as normal # Step two: find the config object and ensure it's there. This # will create the config object is missing. # Step three: lookup an explicit command aliase in the config # Alternative option: if we did not find an explicit alias we # allow automatic abbreviation of the command. "status" for # instance will match "st". We only allow that however if # there is only one command. Callback that is used whenever --config is passed. We use this to always load the correct config. This means that the config is loaded even if the group itself never executes so our aliases stay always available. clikan: CLI personal kanban Place default config file in CLIKAN_HOME or HOME Add a task in todo Delete task Promote task Regress task Show tasks in clikan # td = [ # ['todo', 'in-progress', '[bold magenta]done[/bold magenta]'], # ['', '', ''], # ] #table = SingleTable(td, 'clikan v.{}'.format(VERSION)) # table.inner_heading_row_border = False # table.inner_row_border = True # table.justify_columns = {0: 'center', 1: 'center', 2: 'center'} # def wrap_lines(lines, column_index): # max_width = table.column_max_width(column_index) # packed = [line for line in lines if line.strip() != ''] # wrapped = [wrap(line, max_width, break_long_words=False, # replace_whitespace=False) for line in packed] # return '\n'.join(['\n'.join(w) for w in wrapped]) # for index, section in enumerate((todos, inprogs, dones)): # table.table_data[1][index] = wrap_lines(section.splitlines(), index) #print(table.table) Read the existing data from the config datasource Write the data to the config datasource Read the app config from ~/.clikan.yaml | 2.407381 | 2 |
social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 0 | 9422 | <gh_stars>0
from social_core.backends.oauth import BaseOAuth2
class RagtagOAuth2(BaseOAuth2):
"""Ragtag ID OAuth authentication backend"""
name = "ragtag"
AUTHORIZATION_URL = "https://id.ragtag.org/oauth/authorize/"
ACCESS_TOKEN_URL = "https://id.ragtag.org/oauth/token/"
ACCESS_TOKEN_METHOD = "POST"
REVOKE_TOKEN_URL = "https://id.ragtag.org/oauth/revoke_token/"
SCOPE_SEPARATOR = " "
ID_KEY = "id"
def get_user_details(self, response):
"""Return user details from Ragtag ID account"""
return {
"username": response.get("username"),
"email": response.get("email"),
"first_name": response.get("first_name"),
"last_name": response.get("last_name"),
}
def user_data(self, access_token, *args, **kwargs):
"""Fetches user data from id.ragtag.org"""
return self.get_json(
"https://id.ragtag.org/api/me/",
headers={"Authorization": "Bearer {}".format(access_token)},
)
def auth_params(self, state=None):
params = super(RagtagOAuth2, self).auth_params(state=state)
approval_prompt = self.setting("APPROVAL_PROMPT", "auto")
if not approval_prompt == "auto":
params["approval_prompt"] = self.setting("APPROVAL_PROMPT", "")
return params
| from social_core.backends.oauth import BaseOAuth2
class RagtagOAuth2(BaseOAuth2):
"""Ragtag ID OAuth authentication backend"""
name = "ragtag"
AUTHORIZATION_URL = "https://id.ragtag.org/oauth/authorize/"
ACCESS_TOKEN_URL = "https://id.ragtag.org/oauth/token/"
ACCESS_TOKEN_METHOD = "POST"
REVOKE_TOKEN_URL = "https://id.ragtag.org/oauth/revoke_token/"
SCOPE_SEPARATOR = " "
ID_KEY = "id"
def get_user_details(self, response):
"""Return user details from Ragtag ID account"""
return {
"username": response.get("username"),
"email": response.get("email"),
"first_name": response.get("first_name"),
"last_name": response.get("last_name"),
}
def user_data(self, access_token, *args, **kwargs):
"""Fetches user data from id.ragtag.org"""
return self.get_json(
"https://id.ragtag.org/api/me/",
headers={"Authorization": "Bearer {}".format(access_token)},
)
def auth_params(self, state=None):
params = super(RagtagOAuth2, self).auth_params(state=state)
approval_prompt = self.setting("APPROVAL_PROMPT", "auto")
if not approval_prompt == "auto":
params["approval_prompt"] = self.setting("APPROVAL_PROMPT", "")
return params | en | 0.666773 | Ragtag ID OAuth authentication backend Return user details from Ragtag ID account Fetches user data from id.ragtag.org | 2.586159 | 3 |
panel/api/models/provider.py | angeelgarr/DCPanel | 7 | 9423 | from django.db import models
from django.contrib import admin
class Provider(models.Model):
name = models.CharField(max_length=50)
domain = models.CharField(max_length=50)
class Meta:
ordering = ['name']
app_label = 'api'
def __str__(self):
return self.domain
@admin.register(Provider)
class ProviderAdmin(admin.ModelAdmin):
list_display = ('name', 'domain')
| from django.db import models
from django.contrib import admin
class Provider(models.Model):
name = models.CharField(max_length=50)
domain = models.CharField(max_length=50)
class Meta:
ordering = ['name']
app_label = 'api'
def __str__(self):
return self.domain
@admin.register(Provider)
class ProviderAdmin(admin.ModelAdmin):
list_display = ('name', 'domain')
| none | 1 | 2.265037 | 2 |
|
trial/src/sender.py | siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling | 9 | 9424 | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
from gazebo_msgs.msg import LinkState
def talker():
pub = rospy.Publisher('/gazebo/set_link_state', LinkState, queue_size=10)
ppp = LinkState()
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(100) # 10hz
i = 1
while not rospy.is_shutdown():
ppp.link_name = "platform"
ppp.pose.position.x = 0.1
ppp.pose.position.y = 0.1
ppp.pose.position.z = 1
ppp.pose.orientation.x = 0
ppp.pose.orientation.y = 0
ppp.pose.orientation.z = 0
ppp.pose.orientation.w = 0
i = i+1
rospy.loginfo(ppp)
pub.publish(ppp)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
from gazebo_msgs.msg import LinkState
def talker():
pub = rospy.Publisher('/gazebo/set_link_state', LinkState, queue_size=10)
ppp = LinkState()
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(100) # 10hz
i = 1
while not rospy.is_shutdown():
ppp.link_name = "platform"
ppp.pose.position.x = 0.1
ppp.pose.position.y = 0.1
ppp.pose.position.z = 1
ppp.pose.orientation.x = 0
ppp.pose.orientation.y = 0
ppp.pose.orientation.z = 0
ppp.pose.orientation.w = 0
i = i+1
rospy.loginfo(ppp)
pub.publish(ppp)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| en | 0.434489 | #!/usr/bin/env python # license removed for brevity # 10hz | 2.294635 | 2 |
discriminator_dataset.py | kimmokal/CC-Art-Critics | 0 | 9425 | import torch
from os import listdir, path
from PIL import Image
import torchvision
class DiscriminatorDataset(torch.utils.data.Dataset):
def __init__(self):
super(DiscriminatorDataset, self).__init__()
currentDir = path.dirname(__file__)
abstractDir = path.join(currentDir, 'image_data/abstract')
realisticDir = path.join(currentDir, 'image_data/realistic')
abstractFiles = [path.join(abstractDir, f) for f in listdir(
abstractDir) if path.isfile(path.join(abstractDir, f))]
realisticFiles = [path.join(realisticDir, f) for f in listdir(
realisticDir) if path.isfile(path.join(realisticDir, f))]
self.abstractFilesLen = len(abstractFiles)
self.allFiles = abstractFiles + realisticFiles
def __len__(self):
return len(self.allFiles)
def __getitem__(self, index):
filename = self.allFiles[index]
pilImage = Image.open(filename).convert("RGB")
return (torchvision.transforms.ToTensor()(pilImage), 1 if index < self.abstractFilesLen else 0)
| import torch
from os import listdir, path
from PIL import Image
import torchvision
class DiscriminatorDataset(torch.utils.data.Dataset):
def __init__(self):
super(DiscriminatorDataset, self).__init__()
currentDir = path.dirname(__file__)
abstractDir = path.join(currentDir, 'image_data/abstract')
realisticDir = path.join(currentDir, 'image_data/realistic')
abstractFiles = [path.join(abstractDir, f) for f in listdir(
abstractDir) if path.isfile(path.join(abstractDir, f))]
realisticFiles = [path.join(realisticDir, f) for f in listdir(
realisticDir) if path.isfile(path.join(realisticDir, f))]
self.abstractFilesLen = len(abstractFiles)
self.allFiles = abstractFiles + realisticFiles
def __len__(self):
return len(self.allFiles)
def __getitem__(self, index):
filename = self.allFiles[index]
pilImage = Image.open(filename).convert("RGB")
return (torchvision.transforms.ToTensor()(pilImage), 1 if index < self.abstractFilesLen else 0)
| none | 1 | 2.620354 | 3 |
|
emailmeld/sender.py | ionata/django-emailmeld | 0 | 9426 | <filename>emailmeld/sender.py<gh_stars>0
from django.core.mail.message import EmailMessage, EmailMultiAlternatives
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
def send_mail_task(subject, message, from_email, recipient_list):
message = EmailMessage("Discover Special Value - {0}".format(subject), message, from_email, recipient_list)
message.send()
def send_html_mail_task(subject, text_message, html_message, from_email, recipient_list, template='email/email_base.html'):
if template is not None:
html_message = render_to_string(template, {'content': mark_safe(html_message)}) # render html into an email template
message = EmailMultiAlternatives("Discover Special Value - {0}".format(subject), html_message, from_email, recipient_list)
message.content_subtype = "html"
message.attach_alternative(text_message, "text/plain")
message.send()
| <filename>emailmeld/sender.py<gh_stars>0
from django.core.mail.message import EmailMessage, EmailMultiAlternatives
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
def send_mail_task(subject, message, from_email, recipient_list):
message = EmailMessage("Discover Special Value - {0}".format(subject), message, from_email, recipient_list)
message.send()
def send_html_mail_task(subject, text_message, html_message, from_email, recipient_list, template='email/email_base.html'):
if template is not None:
html_message = render_to_string(template, {'content': mark_safe(html_message)}) # render html into an email template
message = EmailMultiAlternatives("Discover Special Value - {0}".format(subject), html_message, from_email, recipient_list)
message.content_subtype = "html"
message.attach_alternative(text_message, "text/plain")
message.send()
| en | 0.576305 | # render html into an email template | 2.229854 | 2 |
tests/test_hap_server.py | sander-vd/HAP-python | 3 | 9427 | """Tests for the HAPServer."""
from socket import timeout
from unittest.mock import Mock, MagicMock, patch
import pytest
from pyhap import hap_server
@patch('pyhap.hap_server.HAPServer.server_bind', new=MagicMock())
@patch('pyhap.hap_server.HAPServer.server_activate', new=MagicMock())
def test_finish_request_pops_socket():
"""Test that ``finish_request`` always clears the connection after a request."""
amock = Mock()
client_addr = ('192.168.1.1', 55555)
server_addr = ('', 51826)
# Positive case: The request is handled
server = hap_server.HAPServer(server_addr, amock,
handler_type=lambda *args: MagicMock())
server.connections[client_addr] = amock
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
# Negative case: The request fails with a timeout
def raises(*args):
raise timeout()
server = hap_server.HAPServer(server_addr, amock,
handler_type=raises)
server.connections[client_addr] = amock
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
# Negative case: The request raises some other exception
server = hap_server.HAPServer(server_addr, amock,
handler_type=lambda *args: 1 / 0)
server.connections[client_addr] = amock
with pytest.raises(Exception):
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
| """Tests for the HAPServer."""
from socket import timeout
from unittest.mock import Mock, MagicMock, patch
import pytest
from pyhap import hap_server
@patch('pyhap.hap_server.HAPServer.server_bind', new=MagicMock())
@patch('pyhap.hap_server.HAPServer.server_activate', new=MagicMock())
def test_finish_request_pops_socket():
"""Test that ``finish_request`` always clears the connection after a request."""
amock = Mock()
client_addr = ('192.168.1.1', 55555)
server_addr = ('', 51826)
# Positive case: The request is handled
server = hap_server.HAPServer(server_addr, amock,
handler_type=lambda *args: MagicMock())
server.connections[client_addr] = amock
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
# Negative case: The request fails with a timeout
def raises(*args):
raise timeout()
server = hap_server.HAPServer(server_addr, amock,
handler_type=raises)
server.connections[client_addr] = amock
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
# Negative case: The request raises some other exception
server = hap_server.HAPServer(server_addr, amock,
handler_type=lambda *args: 1 / 0)
server.connections[client_addr] = amock
with pytest.raises(Exception):
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
| en | 0.83912 | Tests for the HAPServer. Test that ``finish_request`` always clears the connection after a request. # Positive case: The request is handled # Negative case: The request fails with a timeout # Negative case: The request raises some other exception | 2.710465 | 3 |
app/views/main.py | charlesashby/marketvault-front-end | 0 | 9428 | <filename>app/views/main.py
from flask import render_template, Blueprint, request
from app.utils.search import MySQLClient
from app.utils.preprocessor import TextPreprocessor
mainbp = Blueprint("main", __name__)
@mainbp.route("/search", methods=["GET"])
@mainbp.route("/", methods=["GET"])
def home():
stores_by_page = 10
topic = request.args.get("topic")
category = request.args.get("category")
daily_visitors = request.args.get("dailyvisitors")
alexa_rank = request.args.get("alexarank")
page = request.args.get("page") or 0
if all([topic is None, category is None, daily_visitors is None, alexa_rank is None]):
stores = MySQLClient.random_stores(page * stores_by_page, stores_by_page)
else:
stores = MySQLClient.search_stores(category, daily_visitors, alexa_rank, topic, page * stores_by_page, stores_by_page)
stores = [
{
"url": store.url,
"description": TextPreprocessor.clean_str(store.description),
"title": TextPreprocessor.clean_str(store.title),
"alexa_rank": store.alexa_rank,
"category": store.category,
"average_product_price": store.average_product_price,
"daily_visitors": store.daily_visitors
} for store in stores
]
return render_template("search/index.html", stores=stores)
@mainbp.route("/search/topics", methods=["GET"])
def search_topics():
substring = request.args.get("q")
return [
{
"id": topic.id,
"text": topic.text
} for topic in MySQLClient.search_topic_by_substring(substring)
]
| <filename>app/views/main.py
from flask import render_template, Blueprint, request
from app.utils.search import MySQLClient
from app.utils.preprocessor import TextPreprocessor
mainbp = Blueprint("main", __name__)
@mainbp.route("/search", methods=["GET"])
@mainbp.route("/", methods=["GET"])
def home():
stores_by_page = 10
topic = request.args.get("topic")
category = request.args.get("category")
daily_visitors = request.args.get("dailyvisitors")
alexa_rank = request.args.get("alexarank")
page = request.args.get("page") or 0
if all([topic is None, category is None, daily_visitors is None, alexa_rank is None]):
stores = MySQLClient.random_stores(page * stores_by_page, stores_by_page)
else:
stores = MySQLClient.search_stores(category, daily_visitors, alexa_rank, topic, page * stores_by_page, stores_by_page)
stores = [
{
"url": store.url,
"description": TextPreprocessor.clean_str(store.description),
"title": TextPreprocessor.clean_str(store.title),
"alexa_rank": store.alexa_rank,
"category": store.category,
"average_product_price": store.average_product_price,
"daily_visitors": store.daily_visitors
} for store in stores
]
return render_template("search/index.html", stores=stores)
@mainbp.route("/search/topics", methods=["GET"])
def search_topics():
substring = request.args.get("q")
return [
{
"id": topic.id,
"text": topic.text
} for topic in MySQLClient.search_topic_by_substring(substring)
]
| none | 1 | 2.650454 | 3 |
|
bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py | tinapiao/Software-IC-Automation | 0 | 9429 | <gh_stars>0
# -*- coding: utf-8 -*-
"""This module contains design algorithm for a traditional two stage operational amplifier."""
from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple, Sequence
from copy import deepcopy
import numpy as np
import scipy.optimize as sciopt
from bag.math import gcd
from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db
from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden
from bag.simulation.core import MeasurementManager
from verification.mos.query import MOSDBDiscrete
from .components import LoadDiodePFB, InputGm
if TYPE_CHECKING:
from verification.ac.core import ACTB
class TailStage1(object):
"""Tail transistor of the first stage op amp.
Due to layout restrictions, the tail transistor needs to have the same number of fingers
and stack number as the input transistor. This method finds the optimal width/intent.
"""
def __init__(self, mos_db):
# type: (MOSDBDiscrete) -> None
self._db = mos_db
self._intent_list = mos_db.get_dsn_param_values('intent')
self._valid_widths = mos_db.width_list
self._best_op = None
def design(self,
itarg_list, # type: List[float]
vd_list, # type: List[float]
vout_amp_list, # type: List[float]
vb, # type: float
l, # type: float
seg, # type: int
stack, # type: int
):
# type: (...) -> None
vgs_idx = self._db.get_fun_arg_index('vgs')
self._best_op = best_score = None
for intent in self._intent_list:
for w in self._valid_widths:
self._db.set_dsn_params(l=l, w=w, intent=intent, stack=stack)
ib = self._db.get_function_list('ibias')
gds = self._db.get_function_list('gds')
vgs_min, vgs_max = ib[0].get_input_range(vgs_idx)
vg_min = vgs_min + vb
vg_max = vgs_max + vb
# find vgs for each corner
vgs_list, gds1_list, gds2_list = self._solve_vgs(itarg_list, vout_amp_list, vd_list,
ib, gds, seg, vb, vg_min, vg_max)
if vgs_list is not None:
cur_score = max(gds2_list)
if self._best_op is None or cur_score < best_score:
best_score = cur_score
self._best_op = (w, intent, seg, stack, vb, vgs_list, vout_amp_list,
gds1_list, gds2_list)
def _solve_vgs(self, itarg_list, vout_list, vd_list, ib_list, gds_list, seg, vb, vg_min,
vg_max):
vgs_list, gds1_list, gds2_list = [], [], []
for itarg, vout, vd, ibf, gdsf in zip(itarg_list, vout_list, vd_list, ib_list, gds_list):
def zero_fun(vg):
farg = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vg - vb)
return seg * ibf(farg) - itarg
v1, v2 = zero_fun(vg_min), zero_fun(vg_max)
if v1 < 0 and v2 < 0 or v1 > 0 and v2 > 0:
# no solution
return None, None, None
vg_sol = sciopt.brentq(zero_fun, vg_min, vg_max) # type: float
vgs_opt = vg_sol - vb
arg1 = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vgs_opt)
arg2 = self._db.get_fun_arg(vbs=vb - vd, vds=vout - vb, vgs=vgs_opt)
vgs_list.append(vgs_opt)
gds1_list.append(seg * gdsf(arg1))
gds2_list.append(seg * gdsf(arg2))
return vgs_list, gds1_list, gds2_list
def get_dsn_info(self):
# type: () -> Optional[Dict[str, Any]]
if self._best_op is None:
return None
w, intent, seg, stack, vb, vgs_list, vout_list, gds1_list, gds2_list = self._best_op
self._db.set_dsn_params(w=w, intent=intent, stack=stack)
cdd = self._db.get_function_list('cdd')
cdd2_list = []
for vgs, vout, cddf in zip(vgs_list, vout_list, cdd):
arg = self._db.get_fun_arg(vbs=0, vds=vout - vb, vgs=vgs)
cur_cdd = cddf(arg) # type: float
cdd2_list.append(seg * cur_cdd)
return dict(
w=w,
intent=intent,
vgs=vgs_list,
gds1=gds1_list,
gds2=gds2_list,
cdd2=cdd2_list,
)
class StageOneCurrentError(Exception):
pass
class OpAmpTwoStage(object):
"""A two stage fully differential operational amplifier.
The first stage is a differential amplifier with diode + positive feedback load, the
second stage is a psuedo-differential common source amplifier.
This topology has the following advantages:
1. large output swing.
2. Common mode feedback is only required for the second stage.
"""
def __init__(self, nch_db, pch_db):
# type: (MOSDBDiscrete, MOSDBDiscrete) -> None
self._nch_db = nch_db
self._pch_db = pch_db
self._amp_info = None
def design(self,
i1_unit, # type: List[float]
i1_min_size, # type: int
vg_list, # type: List[float]
vout_list, # type: List[float]
cpar1, # type: float
cload, # type: float
f_unit, # type: float
phase_margin, # type: float
res_var, # type: float
l, # type: float
vstar_gm_min, # type: float
ft_load_scale, # type: float
vds_tail_min, # type: float
seg_gm_min, # type: int
vdd, # type: float
pmos_input=True, # type: bool
max_ref_ratio=20, # type: int
load_stack_list=None, # type: Optional[List[int]]
):
# type: (...) -> None
# binary search for minimum stage 1 current,
i1_size_iter = BinaryIterator(i1_min_size, None)
i1_size_opt, opt_info = None, None
while i1_size_iter.has_next():
i1_size = i1_size_iter.get_next()
print('trying i1_size = %d' % i1_size)
try:
self._design_with_itarg(i1_size, i1_unit, vg_list, vout_list, cpar1, cload,
f_unit, phase_margin, res_var, l, vstar_gm_min,
ft_load_scale, vds_tail_min, seg_gm_min,
vdd, pmos_input, max_ref_ratio, load_stack_list)
success = True
except StageOneCurrentError as err:
print(err)
success = False
if success:
print('success')
opt_info = self._amp_info
i1_size_opt = i1_size
i1_size_iter.down()
else:
i1_size_iter.up()
# linear search to find optimal scale2
scale2_int_max = int(opt_info['scale2'])
if scale2_int_max == opt_info['scale2']:
scale2_int_max -= 1
last_i1_size = i1_size_opt
print('i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
for scale2_test in range(scale2_int_max, 0, -1):
i1_size_test = int(np.floor(i1_size_opt * (1 + opt_info['scale2']) / (1 + scale2_test)))
if i1_size_test <= last_i1_size or scale2_test == opt_info['scale2']:
continue
print('testing i1_size = %d, scale2 = %.4g' % (i1_size_test, scale2_test))
try:
self._design_with_itarg(i1_size_test, i1_unit, vg_list, vout_list, cpar1, cload,
f_unit, phase_margin, res_var, l, vstar_gm_min,
ft_load_scale, vds_tail_min, seg_gm_min,
vdd, pmos_input, max_ref_ratio, load_stack_list)
except StageOneCurrentError as err:
print(err)
continue
if self._amp_info['scale2'] <= scale2_test:
# found new minimum. close in to find optimal i1 size
opt_info = self._amp_info
i1_size_opt = i1_size_test
print('update: i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
i1_size_iter = BinaryIterator(last_i1_size + 1, i1_size_test)
while i1_size_iter.has_next():
i1_size_cur_opt = i1_size_iter.get_next()
print('testing i1_size = %d' % i1_size_cur_opt)
try:
self._design_with_itarg(i1_size_cur_opt, i1_unit, vg_list, vout_list, cpar1,
cload, f_unit, phase_margin, res_var, l,
vstar_gm_min, ft_load_scale, vds_tail_min,
seg_gm_min, vdd, pmos_input, max_ref_ratio,
load_stack_list)
if self._amp_info['scale2'] <= opt_info['scale2']:
opt_info = self._amp_info
i1_size_opt = i1_size_cur_opt
print('update: i1_size = %d, '
'scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
i1_size_iter.down()
else:
i1_size_iter.up()
except StageOneCurrentError as err:
print(err)
i1_size_iter.up()
last_i1_size = i1_size_test
self._amp_info = opt_info
def _design_with_itarg(self,
i1_size, # type: int
i1_unit, # type: List[float]
vg_list, # type: List[float]
vout_list, # type: List[float]
cpar1, # type: float
cload, # type: float
f_unit, # type: float
phase_margin, # type: float
res_var, # type: float
l, # type: float
vstar_gm_min, # type: float
ft_load_scale, # type: float
vds_tail_min, # type: float
seg_gm_min, # type: int
vdd, # type: float
pmos_input, # type: bool
max_ref_ratio, # type: int
load_stack_list, # type: Optional[List[int]]
):
# type: (...) -> None
itarg_list = [i1 * i1_size for i1 in i1_unit]
if pmos_input:
load_db = self._nch_db
gm_db = self._pch_db
vds2_list = vout_list
vb_gm = vdd
vb_load = 0
else:
load_db = self._pch_db
gm_db = self._nch_db
vds2_list = [vo - vdd for vo in vout_list]
vb_gm = 0
vb_load = vdd
load = LoadDiodePFB(load_db)
gm = InputGm(gm_db)
tail1 = TailStage1(gm_db)
# design load
print('designing load')
load.design(itarg_list, vds2_list, ft_load_scale * f_unit, stack_list=load_stack_list)
load_info = load.get_dsn_info()
vgs_load_list = load_info['vgs']
gds_load_list = load_info['gds1']
gm2_list = load_info['gm2']
stack_diode = load_info['stack_diode']
stack_ngm = load_info['stack_ngm']
seg_diode = load_info['seg_diode']
seg_ngm = load_info['seg_ngm']
if pmos_input:
vmid_list = vgs_load_list
else:
vmid_list = [vdd - vgs for vgs in vgs_load_list]
# design input gm
print('designing input gm')
gm.design(itarg_list, vg_list, vmid_list, gds_load_list, vb_gm, vstar_gm_min, vds_tail_min,
seg_min=seg_gm_min, stack_list=[stack_ngm])
gm_info = gm.get_dsn_info()
gm1_list = gm_info['gm']
gds_in_list = gm_info['gds']
vtail_list = gm_info['vs']
seg_gm = gm_info['seg']
stack_gm = gm_info['stack']
gds1_list = [gds_in + gds_load for gds_in, gds_load in zip(gds_in_list, gds_load_list)]
gain1_list = [gm1 / gds1 for gm1, gds1 in zip(gm1_list, gds1_list)]
# design stage 1 tail
print('designing tail')
tail1.design(itarg_list, vtail_list, vout_list, vb_gm, l, seg_gm, stack_gm)
tail1_info = tail1.get_dsn_info()
vbias_list = [vgs_tail + vb_gm for vgs_tail in tail1_info['vgs']]
# design stage 2 gm
w_dict = {'load': load_info['w'], 'in': gm_info['w'], 'tail': tail1_info['w']}
th_dict = {'load': load_info['intent'], 'in': gm_info['intent'],
'tail': tail1_info['intent']}
stack_dict = {'tail': stack_gm, 'in': stack_gm, 'diode': stack_diode, 'ngm': stack_ngm}
seg_dict = {'tail1': seg_gm,
'in': seg_gm,
'diode1': seg_diode,
'ngm1': seg_ngm,
}
print('designing stage 2')
stage2_results = self._design_stage2(gm_db, load_db, vtail_list, vg_list, vmid_list,
vout_list, vbias_list, vb_gm, vb_load, cload, cpar1,
w_dict, th_dict, stack_dict, seg_dict, gm2_list,
res_var, phase_margin, f_unit, max_ref_ratio)
scale2 = seg_dict['diode2'] / seg_dict['diode1']
scaler = seg_dict['ref'] / seg_dict['tail1']
itot_list = [(2 * (1 + scale2) + scaler) * itarg for itarg in itarg_list]
layout_info = dict(
w_dict=w_dict,
th_dict=th_dict,
stack_dict=stack_dict,
seg_dict=seg_dict,
)
self._amp_info = dict(
i1_size=i1_size,
scale2=scale2,
scaler=scaler,
vtail=vtail_list,
vmid=vmid_list,
vbias=vbias_list,
itot=itot_list,
vstar=gm_info['vstar'],
cin=gm_info['cgg'],
gm1=gm1_list,
gds1=gds1_list,
gain1=gain1_list,
rfb=stage2_results['rz'],
cfb=stage2_results['cf'],
gain_tot=stage2_results['gain'],
f_3db=stage2_results['f_3db'],
f_unit=stage2_results['f_unity'],
phase_margin=stage2_results['phase_margin'],
layout_info=layout_info,
)
print('done')
def get_dsn_info(self):
# type: () -> Optional[Dict[str, Any]]
return self._amp_info
def get_specs_verification(self, top_specs):
# type: (Dict[str, Any]) -> Dict[str, Any]
top_specs = deepcopy(top_specs)
dsn_specs = top_specs['dsn_specs']
ibias = dsn_specs['i1_unit'][0] * self._amp_info['i1_size'] * self._amp_info['scaler']
vdd = dsn_specs['vdd']
vindc = dsn_specs['vg_list'][0]
voutdc = dsn_specs['vout_list'][0]
f_unit = dsn_specs['f_unit']
gain_max = max(self._amp_info['gain_tot'])
f_bw_log = int(np.floor(np.log10(f_unit / gain_max)))
f_unit_log = int(np.ceil(np.log10(f_unit)))
top_specs['layout_params'].update(self._amp_info['layout_info'])
meas = top_specs['measurements'][0]
meas['cfb'] = self._amp_info['cfb']
meas['rfb'] = self._amp_info['rfb']
ac_tb = meas['testbenches']['ac']
ac_tb['fstart'] = 10 ** (f_bw_log - 1)
ac_tb['fstop'] = 10 ** (f_unit_log + 1)
ac_sim_vars = ac_tb['sim_vars']
ac_sim_vars['vdd'] = vdd
ac_sim_vars['cload'] = dsn_specs['cload']
ac_sim_vars['vincm'] = vindc
ac_sim_vars['voutcm'] = voutdc
ac_sim_vars['ibias'] = ibias
ac_sim_vars['vdd'] = vdd
ac_sim_vars['vinac'] = 1.0
ac_sim_vars['vindc'] = 0.0
"""
top_specs['tb_dc']['tb_params']['vimax'] = vdd
top_specs['tb_dc']['tb_params']['vimin'] = -vdd
top_specs['tb_dc']['tb_params']['vindc'] = vindc
top_specs['tb_dc']['tb_params']['voutcm'] = voutdc
top_specs['tb_dc']['tb_params']['ibias'] = ibias
top_specs['tb_dc']['tb_params']['vdd'] = vdd
top_specs['tb_dc']['tb_params']['voutref'] = voutdc
top_specs['tb_dc']['tb_params']['vout_start'] = -vdd + 0.15
top_specs['tb_dc']['tb_params']['vout_stop'] = vdd - 0.15
"""
return top_specs
def _design_stage2(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list,
vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gm2_list, res_var, phase_margin, f_unit, max_ref_ratio):
seg_tail1 = seg_dict['tail1']
seg_diode1 = seg_dict['diode1']
seg_ngm1 = seg_dict['ngm1']
# step 1: find stage 2 unit size
seg_gcd = gcd(gcd(seg_tail1, seg_diode1), seg_ngm1)
if seg_gcd % 2 != 0:
raise ValueError('All segment numbers must be even.')
# divide seg_gcd by 2 to make sure all generated segment numbers are even
seg_gcd //= 2
# make sure we have enough tail fingers for common mode feedback
min_size = 2 if seg_tail1 // seg_gcd == 2 else 1
def ac_results_fun(cur_size):
seg_dict['tail2'] = seg_tail1 // seg_gcd * cur_size
seg_dict['diode2'] = seg_diode1 // seg_gcd * cur_size
seg_dict['ngm2'] = seg_ngm1 // seg_gcd * cur_size
cur_scale2 = cur_size / seg_gcd
cur_gm2_list = [gm2 * cur_scale2 for gm2 in gm2_list]
ac_results = self._find_rz_cf(gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list,
vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict,
stack_dict, seg_dict, cur_gm2_list, res_var, phase_margin)
return ac_results
def funity_fun(cur_size):
ac_results_tmp = ac_results_fun(cur_size)
fu_list = ac_results_tmp[0]
if fu_list is None:
return -1
# noinspection PyTypeChecker
ans = min(fu_list)
return ans
# find min_size such that amplifier is stable
min_bin_iter = BinaryIterator(min_size, None)
while min_bin_iter.has_next():
test_size = min_bin_iter.get_next()
test_fu = funity_fun(test_size)
if test_fu >= 0:
min_bin_iter.save()
min_bin_iter.down()
else:
min_bin_iter.up()
min_result = minimize_cost_golden(funity_fun, f_unit, offset=min_bin_iter.get_last_save())
if min_result.x is None:
msg = 'Insufficient stage 1 current. funity_max=%.4g'
raise StageOneCurrentError(msg % min_result.vmax)
funity_list, rz_nom, cf_min, gain_list, f3db_list, pm_list = ac_results_fun(min_result.x)
seg_tail2_tot = seg_dict['tail2']
seg_tail2 = (seg_tail2_tot // 4) * 2
seg_tailcm = seg_tail2_tot - seg_tail2
seg_tail_tot = 2 * (seg_dict['tail1'] + seg_tail2)
seg_dict['tail2'] = seg_tail2
seg_dict['tailcm'] = seg_tailcm
seg_dict['ref'] = max(2, -((-seg_tail_tot // max_ref_ratio) // 2) * 2)
return dict(
rz=rz_nom,
cf=cf_min,
gain=gain_list,
f_3db=f3db_list,
f_unity=funity_list,
phase_margin=pm_list,
)
@classmethod
def _get_stage2_ss(cls, gm2_list, gds2_list, c2_list, cg2_list, cload, seg_gcd, cur_size):
cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list = [], [], [], []
for gm2, gds2, c2, cg2 in zip(gm2_list, gds2_list, c2_list, cg2_list):
cur_gm2_list.append(gm2 * cur_size / seg_gcd)
cur_gds2_list.append(gds2 * cur_size / seg_gcd)
cur_c2_list.append(cload + c2 * cur_size / seg_gcd)
cur_cg2_list.append(cg2 * cur_size / seg_gcd)
return cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list
def _find_rz_cf(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list,
vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gm2_list, res_var, phase_margin, cap_tol=1e-15, cap_step=10e-15, cap_min=1e-15,
cap_max=1e-9):
"""Find minimum miller cap that stabilizes the system.
NOTE: This function assume phase of system for any miller cap value will not loop
around 360, otherwise it may get the phase margin wrong. This assumption should be valid
for this op amp.
"""
gz_worst = float(min(gm2_list))
gz_nom = gz_worst * (1 - res_var)
# find maximum Cf needed to stabilize all corners
cf_min = cap_min
for env_idx, (vtail, vg, vmid, vout, vbias) in \
enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)):
cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm,
vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gz_worst)
bin_iter = FloatBinaryIterator(cf_min, None, cap_tol, search_step=cap_step)
while bin_iter.has_next():
cur_cf = bin_iter.get_next()
cir.add_cap(cur_cf, 'outp', 'xp')
cir.add_cap(cur_cf, 'outn', 'xn')
num, den = cir.get_num_den('in', 'out')
cur_pm, _ = get_stability_margins(num, den)
if cur_pm < phase_margin:
if cur_cf > cap_max:
# no way to make amplifier stable, just return
return None, None, None, None, None, None
bin_iter.up()
else:
bin_iter.save()
bin_iter.down()
cir.add_cap(-cur_cf, 'outp', 'xp')
cir.add_cap(-cur_cf, 'outn', 'xn')
# bin_iter is guaranteed to save at least one value, so don't need to worry about
# cf_min being None
cf_min = bin_iter.get_last_save()
# find gain, unity gain bandwidth, and phase margin across corners
gain_list, f3db_list, funity_list, pm_list = [], [], [], []
for env_idx, (vtail, vg, vmid, vout, vbias) in \
enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)):
cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm,
vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gz_nom)
cir.add_cap(cf_min, 'outp', 'xp')
cir.add_cap(cf_min, 'outn', 'xn')
num, den = cir.get_num_den('in', 'out')
pn = np.poly1d(num)
pd = np.poly1d(den)
gain_list.append(abs(pn(0) / pd(0)))
f3db_list.append(get_w_3db(num, den) / 2 / np.pi)
funity_list.append(get_w_crossings(num, den)[0] / 2 / np.pi)
pm_list.append(get_stability_margins(num, den)[0])
return funity_list, 1 / gz_nom, cf_min, gain_list, f3db_list, pm_list
@classmethod
def _make_circuit(cls, env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm, vb_load,
cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gz, neg_cap=False,
no_fb=False):
cur_env = gm_db.env_list[env_idx]
gm_db.set_dsn_params(w=w_dict['tail'], intent=th_dict['tail'], stack=stack_dict['tail'])
tail1_params = gm_db.query(env=cur_env, vbs=0, vds=vtail - vb_gm, vgs=vbias - vb_gm)
tail2_params = gm_db.query(env=cur_env, vbs=0, vds=vout - vb_gm, vgs=vbias - vb_gm)
gm_db.set_dsn_params(w=w_dict['in'], intent=th_dict['in'], stack=stack_dict['in'])
gm1_params = gm_db.query(env=cur_env, vbs=vb_gm - vtail, vds=vmid - vtail, vgs=vg - vtail)
load_db.set_dsn_params(w=w_dict['load'], intent=th_dict['load'], stack=stack_dict['diode'])
diode1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load)
diode2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load)
load_db.set_dsn_params(stack=stack_dict['ngm'])
ngm1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load)
ngm2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load)
cir = LTICircuit()
# stage 1
cir.add_transistor(tail1_params, 'tail', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail1'],
neg_cap=neg_cap)
cir.add_transistor(gm1_params, 'midp', 'inn', 'tail', 'gnd', fg=seg_dict['in'],
neg_cap=neg_cap)
cir.add_transistor(gm1_params, 'midn', 'inp', 'tail', 'gnd', fg=seg_dict['in'],
neg_cap=neg_cap)
cir.add_transistor(diode1_params, 'midp', 'midp', 'gnd', 'gnd', fg=seg_dict['diode1'],
neg_cap=neg_cap)
cir.add_transistor(diode1_params, 'midn', 'midn', 'gnd', 'gnd', fg=seg_dict['diode1'],
neg_cap=neg_cap)
cir.add_transistor(ngm1_params, 'midn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm1'],
neg_cap=neg_cap)
cir.add_transistor(ngm1_params, 'midp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm1'],
neg_cap=neg_cap)
# stage 2
cir.add_transistor(tail2_params, 'outp', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'],
neg_cap=neg_cap)
cir.add_transistor(tail2_params, 'outn', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'],
neg_cap=neg_cap)
cir.add_transistor(diode2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['diode2'],
neg_cap=neg_cap)
cir.add_transistor(diode2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['diode2'],
neg_cap=neg_cap)
cir.add_transistor(ngm2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm2'],
neg_cap=neg_cap)
cir.add_transistor(ngm2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm2'],
neg_cap=neg_cap)
# parasitic cap
cir.add_cap(cpar1, 'midp', 'gnd')
cir.add_cap(cpar1, 'midn', 'gnd')
# load cap
cir.add_cap(cload, 'outp', 'gnd')
cir.add_cap(cload, 'outn', 'gnd')
# feedback resistors
if not no_fb:
cir.add_conductance(gz, 'xp', 'midn')
cir.add_conductance(gz, 'xn', 'midp')
# diff-to-single conversion
cir.add_vcvs(0.5, 'inp', 'gnd', 'in', 'gnd')
cir.add_vcvs(-0.5, 'inn', 'gnd', 'in', 'gnd')
cir.add_vcvs(1, 'out', 'gnd', 'outp', 'outn')
return cir
class OpAmpTwoStageChar(MeasurementManager):
def __init__(self,
data_dir, # type: str
meas_name, # type: str
impl_lib, # type: str
specs, # type: Dict[str, Any]
wrapper_lookup, # type: Dict[str, str]
sim_view_list, # type: Sequence[Tuple[str, str]]
env_list, # type: Sequence[str]
):
MeasurementManager.__init__(self, data_dir, meas_name, impl_lib, specs, wrapper_lookup,
sim_view_list, env_list)
def get_initial_state(self):
# type: () -> str
"""Returns the initial FSM state."""
return 'ac0'
def get_testbench_info(self, state, prev_output):
rfb0 = self.specs['rfb']
cfb0 = self.specs['cfb']
find_cfb = self.specs.get('find_cfb', True)
res_var = self.specs['res_var']
cmin_scale = self.specs['cmin_scale']
cmax_scale = self.specs['cmax_scale']
num_pts = self.specs['num_pts']
tmp = super(OpAmpTwoStageChar, self).get_testbench_info('ac', prev_output)
tb_name, tb_type, tb_specs, tb_params = tmp
if state == 'ac0' and find_cfb:
cfb_list = np.linspace(cfb0 * cmin_scale, cfb0 * cmax_scale, num_pts).tolist()
tb_specs['sim_vars']['rfb'] = rfb0 * (1 - res_var)
tb_specs['sim_vars']['cfb'] = cfb_list
else:
if find_cfb:
cfb = self.get_state_output('ac0')['cfb']
else:
cfb = cfb0
tb_specs['sim_vars']['rfb'] = rfb0
tb_specs['sim_vars']['cfb'] = cfb
return tb_name, tb_type, tb_specs, tb_params
def process_output(self, state, data, tb_manager):
# type: (str, Dict[str, Any], ACTB) -> Tuple[bool, str, Dict[str, Any]]
phase_margin = self.specs['phase_margin']
find_cfb = self.specs.get('find_cfb', True)
output_list = ['vout']
results = tb_manager.get_ugb_and_pm(data, output_list)
if state == 'ac0' and find_cfb:
done = False
next_state = 'ac1'
cfb = self._find_min_cfb(phase_margin, results)
output = dict(cfb=cfb)
else:
done = True
next_state = ''
if find_cfb:
cfb = self.get_state_output('ac0')['cfb']
else:
cfb = self.specs['cfb']
gain_results = tb_manager.get_gain_and_w3db(data, output_list, output_dict=results)
corner_list = results['corner'].tolist()
gain_list = gain_results['gain_vout'].tolist()
bw_list = gain_results['w3db_vout'].tolist()
funity_list = results['funity_vout'].tolist()
pm_list = results['pm_vout'].tolist()
output = dict(cfb=cfb, corners=corner_list, gain=gain_list, bw=bw_list,
funity=funity_list, pm=pm_list)
return done, next_state, output
@classmethod
def _find_min_cfb(cls, phase_margin, results):
axis_names = ['corner', 'cfb']
corner_list = results['corner']
corner_sort_arg = np.argsort(corner_list) # type: Sequence[int]
# rearrange array axis
sweep_vars = results['sweep_params']['pm_vout']
order = [sweep_vars.index(name) for name in axis_names]
pm_data = np.transpose(results['pm_vout'], axes=order)
# determine minimum cfb
cfb_vec = results['cfb']
cfb_idx_min = 0
for corner_idx in corner_sort_arg:
bin_iter = BinaryIterator(cfb_idx_min, cfb_vec.size)
while bin_iter.has_next():
cur_cfb_idx = bin_iter.get_next()
pm = pm_data[corner_idx, cur_cfb_idx]
if pm >= phase_margin:
bin_iter.save()
bin_iter.down()
else:
bin_iter.up()
cfb_idx_min = bin_iter.get_last_save()
if cfb_idx_min is None:
# No solution; cannot make amplifier stable
break
if cfb_idx_min is None:
raise ValueError('Cannot determine cfb.')
else:
cfb = cfb_vec[cfb_idx_min]
return cfb.item()
| # -*- coding: utf-8 -*-
"""This module contains design algorithm for a traditional two stage operational amplifier."""
from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple, Sequence
from copy import deepcopy
import numpy as np
import scipy.optimize as sciopt
from bag.math import gcd
from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db
from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden
from bag.simulation.core import MeasurementManager
from verification.mos.query import MOSDBDiscrete
from .components import LoadDiodePFB, InputGm
if TYPE_CHECKING:
from verification.ac.core import ACTB
class TailStage1(object):
"""Tail transistor of the first stage op amp.
Due to layout restrictions, the tail transistor needs to have the same number of fingers
and stack number as the input transistor. This method finds the optimal width/intent.
"""
def __init__(self, mos_db):
# type: (MOSDBDiscrete) -> None
self._db = mos_db
self._intent_list = mos_db.get_dsn_param_values('intent')
self._valid_widths = mos_db.width_list
self._best_op = None
def design(self,
itarg_list, # type: List[float]
vd_list, # type: List[float]
vout_amp_list, # type: List[float]
vb, # type: float
l, # type: float
seg, # type: int
stack, # type: int
):
# type: (...) -> None
vgs_idx = self._db.get_fun_arg_index('vgs')
self._best_op = best_score = None
for intent in self._intent_list:
for w in self._valid_widths:
self._db.set_dsn_params(l=l, w=w, intent=intent, stack=stack)
ib = self._db.get_function_list('ibias')
gds = self._db.get_function_list('gds')
vgs_min, vgs_max = ib[0].get_input_range(vgs_idx)
vg_min = vgs_min + vb
vg_max = vgs_max + vb
# find vgs for each corner
vgs_list, gds1_list, gds2_list = self._solve_vgs(itarg_list, vout_amp_list, vd_list,
ib, gds, seg, vb, vg_min, vg_max)
if vgs_list is not None:
cur_score = max(gds2_list)
if self._best_op is None or cur_score < best_score:
best_score = cur_score
self._best_op = (w, intent, seg, stack, vb, vgs_list, vout_amp_list,
gds1_list, gds2_list)
def _solve_vgs(self, itarg_list, vout_list, vd_list, ib_list, gds_list, seg, vb, vg_min,
vg_max):
vgs_list, gds1_list, gds2_list = [], [], []
for itarg, vout, vd, ibf, gdsf in zip(itarg_list, vout_list, vd_list, ib_list, gds_list):
def zero_fun(vg):
farg = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vg - vb)
return seg * ibf(farg) - itarg
v1, v2 = zero_fun(vg_min), zero_fun(vg_max)
if v1 < 0 and v2 < 0 or v1 > 0 and v2 > 0:
# no solution
return None, None, None
vg_sol = sciopt.brentq(zero_fun, vg_min, vg_max) # type: float
vgs_opt = vg_sol - vb
arg1 = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vgs_opt)
arg2 = self._db.get_fun_arg(vbs=vb - vd, vds=vout - vb, vgs=vgs_opt)
vgs_list.append(vgs_opt)
gds1_list.append(seg * gdsf(arg1))
gds2_list.append(seg * gdsf(arg2))
return vgs_list, gds1_list, gds2_list
def get_dsn_info(self):
# type: () -> Optional[Dict[str, Any]]
if self._best_op is None:
return None
w, intent, seg, stack, vb, vgs_list, vout_list, gds1_list, gds2_list = self._best_op
self._db.set_dsn_params(w=w, intent=intent, stack=stack)
cdd = self._db.get_function_list('cdd')
cdd2_list = []
for vgs, vout, cddf in zip(vgs_list, vout_list, cdd):
arg = self._db.get_fun_arg(vbs=0, vds=vout - vb, vgs=vgs)
cur_cdd = cddf(arg) # type: float
cdd2_list.append(seg * cur_cdd)
return dict(
w=w,
intent=intent,
vgs=vgs_list,
gds1=gds1_list,
gds2=gds2_list,
cdd2=cdd2_list,
)
class StageOneCurrentError(Exception):
pass
class OpAmpTwoStage(object):
"""A two stage fully differential operational amplifier.
The first stage is a differential amplifier with diode + positive feedback load, the
second stage is a psuedo-differential common source amplifier.
This topology has the following advantages:
1. large output swing.
2. Common mode feedback is only required for the second stage.
"""
def __init__(self, nch_db, pch_db):
# type: (MOSDBDiscrete, MOSDBDiscrete) -> None
self._nch_db = nch_db
self._pch_db = pch_db
self._amp_info = None
def design(self,
i1_unit, # type: List[float]
i1_min_size, # type: int
vg_list, # type: List[float]
vout_list, # type: List[float]
cpar1, # type: float
cload, # type: float
f_unit, # type: float
phase_margin, # type: float
res_var, # type: float
l, # type: float
vstar_gm_min, # type: float
ft_load_scale, # type: float
vds_tail_min, # type: float
seg_gm_min, # type: int
vdd, # type: float
pmos_input=True, # type: bool
max_ref_ratio=20, # type: int
load_stack_list=None, # type: Optional[List[int]]
):
# type: (...) -> None
# binary search for minimum stage 1 current,
i1_size_iter = BinaryIterator(i1_min_size, None)
i1_size_opt, opt_info = None, None
while i1_size_iter.has_next():
i1_size = i1_size_iter.get_next()
print('trying i1_size = %d' % i1_size)
try:
self._design_with_itarg(i1_size, i1_unit, vg_list, vout_list, cpar1, cload,
f_unit, phase_margin, res_var, l, vstar_gm_min,
ft_load_scale, vds_tail_min, seg_gm_min,
vdd, pmos_input, max_ref_ratio, load_stack_list)
success = True
except StageOneCurrentError as err:
print(err)
success = False
if success:
print('success')
opt_info = self._amp_info
i1_size_opt = i1_size
i1_size_iter.down()
else:
i1_size_iter.up()
# linear search to find optimal scale2
scale2_int_max = int(opt_info['scale2'])
if scale2_int_max == opt_info['scale2']:
scale2_int_max -= 1
last_i1_size = i1_size_opt
print('i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
for scale2_test in range(scale2_int_max, 0, -1):
i1_size_test = int(np.floor(i1_size_opt * (1 + opt_info['scale2']) / (1 + scale2_test)))
if i1_size_test <= last_i1_size or scale2_test == opt_info['scale2']:
continue
print('testing i1_size = %d, scale2 = %.4g' % (i1_size_test, scale2_test))
try:
self._design_with_itarg(i1_size_test, i1_unit, vg_list, vout_list, cpar1, cload,
f_unit, phase_margin, res_var, l, vstar_gm_min,
ft_load_scale, vds_tail_min, seg_gm_min,
vdd, pmos_input, max_ref_ratio, load_stack_list)
except StageOneCurrentError as err:
print(err)
continue
if self._amp_info['scale2'] <= scale2_test:
# found new minimum. close in to find optimal i1 size
opt_info = self._amp_info
i1_size_opt = i1_size_test
print('update: i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
i1_size_iter = BinaryIterator(last_i1_size + 1, i1_size_test)
while i1_size_iter.has_next():
i1_size_cur_opt = i1_size_iter.get_next()
print('testing i1_size = %d' % i1_size_cur_opt)
try:
self._design_with_itarg(i1_size_cur_opt, i1_unit, vg_list, vout_list, cpar1,
cload, f_unit, phase_margin, res_var, l,
vstar_gm_min, ft_load_scale, vds_tail_min,
seg_gm_min, vdd, pmos_input, max_ref_ratio,
load_stack_list)
if self._amp_info['scale2'] <= opt_info['scale2']:
opt_info = self._amp_info
i1_size_opt = i1_size_cur_opt
print('update: i1_size = %d, '
'scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
i1_size_iter.down()
else:
i1_size_iter.up()
except StageOneCurrentError as err:
print(err)
i1_size_iter.up()
last_i1_size = i1_size_test
self._amp_info = opt_info
def _design_with_itarg(self,
i1_size, # type: int
i1_unit, # type: List[float]
vg_list, # type: List[float]
vout_list, # type: List[float]
cpar1, # type: float
cload, # type: float
f_unit, # type: float
phase_margin, # type: float
res_var, # type: float
l, # type: float
vstar_gm_min, # type: float
ft_load_scale, # type: float
vds_tail_min, # type: float
seg_gm_min, # type: int
vdd, # type: float
pmos_input, # type: bool
max_ref_ratio, # type: int
load_stack_list, # type: Optional[List[int]]
):
# type: (...) -> None
itarg_list = [i1 * i1_size for i1 in i1_unit]
if pmos_input:
load_db = self._nch_db
gm_db = self._pch_db
vds2_list = vout_list
vb_gm = vdd
vb_load = 0
else:
load_db = self._pch_db
gm_db = self._nch_db
vds2_list = [vo - vdd for vo in vout_list]
vb_gm = 0
vb_load = vdd
load = LoadDiodePFB(load_db)
gm = InputGm(gm_db)
tail1 = TailStage1(gm_db)
# design load
print('designing load')
load.design(itarg_list, vds2_list, ft_load_scale * f_unit, stack_list=load_stack_list)
load_info = load.get_dsn_info()
vgs_load_list = load_info['vgs']
gds_load_list = load_info['gds1']
gm2_list = load_info['gm2']
stack_diode = load_info['stack_diode']
stack_ngm = load_info['stack_ngm']
seg_diode = load_info['seg_diode']
seg_ngm = load_info['seg_ngm']
if pmos_input:
vmid_list = vgs_load_list
else:
vmid_list = [vdd - vgs for vgs in vgs_load_list]
# design input gm
print('designing input gm')
gm.design(itarg_list, vg_list, vmid_list, gds_load_list, vb_gm, vstar_gm_min, vds_tail_min,
seg_min=seg_gm_min, stack_list=[stack_ngm])
gm_info = gm.get_dsn_info()
gm1_list = gm_info['gm']
gds_in_list = gm_info['gds']
vtail_list = gm_info['vs']
seg_gm = gm_info['seg']
stack_gm = gm_info['stack']
gds1_list = [gds_in + gds_load for gds_in, gds_load in zip(gds_in_list, gds_load_list)]
gain1_list = [gm1 / gds1 for gm1, gds1 in zip(gm1_list, gds1_list)]
# design stage 1 tail
print('designing tail')
tail1.design(itarg_list, vtail_list, vout_list, vb_gm, l, seg_gm, stack_gm)
tail1_info = tail1.get_dsn_info()
vbias_list = [vgs_tail + vb_gm for vgs_tail in tail1_info['vgs']]
# design stage 2 gm
w_dict = {'load': load_info['w'], 'in': gm_info['w'], 'tail': tail1_info['w']}
th_dict = {'load': load_info['intent'], 'in': gm_info['intent'],
'tail': tail1_info['intent']}
stack_dict = {'tail': stack_gm, 'in': stack_gm, 'diode': stack_diode, 'ngm': stack_ngm}
seg_dict = {'tail1': seg_gm,
'in': seg_gm,
'diode1': seg_diode,
'ngm1': seg_ngm,
}
print('designing stage 2')
stage2_results = self._design_stage2(gm_db, load_db, vtail_list, vg_list, vmid_list,
vout_list, vbias_list, vb_gm, vb_load, cload, cpar1,
w_dict, th_dict, stack_dict, seg_dict, gm2_list,
res_var, phase_margin, f_unit, max_ref_ratio)
scale2 = seg_dict['diode2'] / seg_dict['diode1']
scaler = seg_dict['ref'] / seg_dict['tail1']
itot_list = [(2 * (1 + scale2) + scaler) * itarg for itarg in itarg_list]
layout_info = dict(
w_dict=w_dict,
th_dict=th_dict,
stack_dict=stack_dict,
seg_dict=seg_dict,
)
self._amp_info = dict(
i1_size=i1_size,
scale2=scale2,
scaler=scaler,
vtail=vtail_list,
vmid=vmid_list,
vbias=vbias_list,
itot=itot_list,
vstar=gm_info['vstar'],
cin=gm_info['cgg'],
gm1=gm1_list,
gds1=gds1_list,
gain1=gain1_list,
rfb=stage2_results['rz'],
cfb=stage2_results['cf'],
gain_tot=stage2_results['gain'],
f_3db=stage2_results['f_3db'],
f_unit=stage2_results['f_unity'],
phase_margin=stage2_results['phase_margin'],
layout_info=layout_info,
)
print('done')
def get_dsn_info(self):
# type: () -> Optional[Dict[str, Any]]
return self._amp_info
def get_specs_verification(self, top_specs):
# type: (Dict[str, Any]) -> Dict[str, Any]
top_specs = deepcopy(top_specs)
dsn_specs = top_specs['dsn_specs']
ibias = dsn_specs['i1_unit'][0] * self._amp_info['i1_size'] * self._amp_info['scaler']
vdd = dsn_specs['vdd']
vindc = dsn_specs['vg_list'][0]
voutdc = dsn_specs['vout_list'][0]
f_unit = dsn_specs['f_unit']
gain_max = max(self._amp_info['gain_tot'])
f_bw_log = int(np.floor(np.log10(f_unit / gain_max)))
f_unit_log = int(np.ceil(np.log10(f_unit)))
top_specs['layout_params'].update(self._amp_info['layout_info'])
meas = top_specs['measurements'][0]
meas['cfb'] = self._amp_info['cfb']
meas['rfb'] = self._amp_info['rfb']
ac_tb = meas['testbenches']['ac']
ac_tb['fstart'] = 10 ** (f_bw_log - 1)
ac_tb['fstop'] = 10 ** (f_unit_log + 1)
ac_sim_vars = ac_tb['sim_vars']
ac_sim_vars['vdd'] = vdd
ac_sim_vars['cload'] = dsn_specs['cload']
ac_sim_vars['vincm'] = vindc
ac_sim_vars['voutcm'] = voutdc
ac_sim_vars['ibias'] = ibias
ac_sim_vars['vdd'] = vdd
ac_sim_vars['vinac'] = 1.0
ac_sim_vars['vindc'] = 0.0
"""
top_specs['tb_dc']['tb_params']['vimax'] = vdd
top_specs['tb_dc']['tb_params']['vimin'] = -vdd
top_specs['tb_dc']['tb_params']['vindc'] = vindc
top_specs['tb_dc']['tb_params']['voutcm'] = voutdc
top_specs['tb_dc']['tb_params']['ibias'] = ibias
top_specs['tb_dc']['tb_params']['vdd'] = vdd
top_specs['tb_dc']['tb_params']['voutref'] = voutdc
top_specs['tb_dc']['tb_params']['vout_start'] = -vdd + 0.15
top_specs['tb_dc']['tb_params']['vout_stop'] = vdd - 0.15
"""
return top_specs
def _design_stage2(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list,
vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gm2_list, res_var, phase_margin, f_unit, max_ref_ratio):
seg_tail1 = seg_dict['tail1']
seg_diode1 = seg_dict['diode1']
seg_ngm1 = seg_dict['ngm1']
# step 1: find stage 2 unit size
seg_gcd = gcd(gcd(seg_tail1, seg_diode1), seg_ngm1)
if seg_gcd % 2 != 0:
raise ValueError('All segment numbers must be even.')
# divide seg_gcd by 2 to make sure all generated segment numbers are even
seg_gcd //= 2
# make sure we have enough tail fingers for common mode feedback
min_size = 2 if seg_tail1 // seg_gcd == 2 else 1
def ac_results_fun(cur_size):
seg_dict['tail2'] = seg_tail1 // seg_gcd * cur_size
seg_dict['diode2'] = seg_diode1 // seg_gcd * cur_size
seg_dict['ngm2'] = seg_ngm1 // seg_gcd * cur_size
cur_scale2 = cur_size / seg_gcd
cur_gm2_list = [gm2 * cur_scale2 for gm2 in gm2_list]
ac_results = self._find_rz_cf(gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list,
vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict,
stack_dict, seg_dict, cur_gm2_list, res_var, phase_margin)
return ac_results
def funity_fun(cur_size):
ac_results_tmp = ac_results_fun(cur_size)
fu_list = ac_results_tmp[0]
if fu_list is None:
return -1
# noinspection PyTypeChecker
ans = min(fu_list)
return ans
# find min_size such that amplifier is stable
min_bin_iter = BinaryIterator(min_size, None)
while min_bin_iter.has_next():
test_size = min_bin_iter.get_next()
test_fu = funity_fun(test_size)
if test_fu >= 0:
min_bin_iter.save()
min_bin_iter.down()
else:
min_bin_iter.up()
min_result = minimize_cost_golden(funity_fun, f_unit, offset=min_bin_iter.get_last_save())
if min_result.x is None:
msg = 'Insufficient stage 1 current. funity_max=%.4g'
raise StageOneCurrentError(msg % min_result.vmax)
funity_list, rz_nom, cf_min, gain_list, f3db_list, pm_list = ac_results_fun(min_result.x)
seg_tail2_tot = seg_dict['tail2']
seg_tail2 = (seg_tail2_tot // 4) * 2
seg_tailcm = seg_tail2_tot - seg_tail2
seg_tail_tot = 2 * (seg_dict['tail1'] + seg_tail2)
seg_dict['tail2'] = seg_tail2
seg_dict['tailcm'] = seg_tailcm
seg_dict['ref'] = max(2, -((-seg_tail_tot // max_ref_ratio) // 2) * 2)
return dict(
rz=rz_nom,
cf=cf_min,
gain=gain_list,
f_3db=f3db_list,
f_unity=funity_list,
phase_margin=pm_list,
)
@classmethod
def _get_stage2_ss(cls, gm2_list, gds2_list, c2_list, cg2_list, cload, seg_gcd, cur_size):
cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list = [], [], [], []
for gm2, gds2, c2, cg2 in zip(gm2_list, gds2_list, c2_list, cg2_list):
cur_gm2_list.append(gm2 * cur_size / seg_gcd)
cur_gds2_list.append(gds2 * cur_size / seg_gcd)
cur_c2_list.append(cload + c2 * cur_size / seg_gcd)
cur_cg2_list.append(cg2 * cur_size / seg_gcd)
return cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list
def _find_rz_cf(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list,
vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gm2_list, res_var, phase_margin, cap_tol=1e-15, cap_step=10e-15, cap_min=1e-15,
cap_max=1e-9):
"""Find minimum miller cap that stabilizes the system.
NOTE: This function assume phase of system for any miller cap value will not loop
around 360, otherwise it may get the phase margin wrong. This assumption should be valid
for this op amp.
"""
gz_worst = float(min(gm2_list))
gz_nom = gz_worst * (1 - res_var)
# find maximum Cf needed to stabilize all corners
cf_min = cap_min
for env_idx, (vtail, vg, vmid, vout, vbias) in \
enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)):
cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm,
vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gz_worst)
bin_iter = FloatBinaryIterator(cf_min, None, cap_tol, search_step=cap_step)
while bin_iter.has_next():
cur_cf = bin_iter.get_next()
cir.add_cap(cur_cf, 'outp', 'xp')
cir.add_cap(cur_cf, 'outn', 'xn')
num, den = cir.get_num_den('in', 'out')
cur_pm, _ = get_stability_margins(num, den)
if cur_pm < phase_margin:
if cur_cf > cap_max:
# no way to make amplifier stable, just return
return None, None, None, None, None, None
bin_iter.up()
else:
bin_iter.save()
bin_iter.down()
cir.add_cap(-cur_cf, 'outp', 'xp')
cir.add_cap(-cur_cf, 'outn', 'xn')
# bin_iter is guaranteed to save at least one value, so don't need to worry about
# cf_min being None
cf_min = bin_iter.get_last_save()
# find gain, unity gain bandwidth, and phase margin across corners
gain_list, f3db_list, funity_list, pm_list = [], [], [], []
for env_idx, (vtail, vg, vmid, vout, vbias) in \
enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)):
cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm,
vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gz_nom)
cir.add_cap(cf_min, 'outp', 'xp')
cir.add_cap(cf_min, 'outn', 'xn')
num, den = cir.get_num_den('in', 'out')
pn = np.poly1d(num)
pd = np.poly1d(den)
gain_list.append(abs(pn(0) / pd(0)))
f3db_list.append(get_w_3db(num, den) / 2 / np.pi)
funity_list.append(get_w_crossings(num, den)[0] / 2 / np.pi)
pm_list.append(get_stability_margins(num, den)[0])
return funity_list, 1 / gz_nom, cf_min, gain_list, f3db_list, pm_list
@classmethod
def _make_circuit(cls, env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm, vb_load,
cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gz, neg_cap=False,
no_fb=False):
cur_env = gm_db.env_list[env_idx]
gm_db.set_dsn_params(w=w_dict['tail'], intent=th_dict['tail'], stack=stack_dict['tail'])
tail1_params = gm_db.query(env=cur_env, vbs=0, vds=vtail - vb_gm, vgs=vbias - vb_gm)
tail2_params = gm_db.query(env=cur_env, vbs=0, vds=vout - vb_gm, vgs=vbias - vb_gm)
gm_db.set_dsn_params(w=w_dict['in'], intent=th_dict['in'], stack=stack_dict['in'])
gm1_params = gm_db.query(env=cur_env, vbs=vb_gm - vtail, vds=vmid - vtail, vgs=vg - vtail)
load_db.set_dsn_params(w=w_dict['load'], intent=th_dict['load'], stack=stack_dict['diode'])
diode1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load)
diode2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load)
load_db.set_dsn_params(stack=stack_dict['ngm'])
ngm1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load)
ngm2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load)
cir = LTICircuit()
# stage 1
cir.add_transistor(tail1_params, 'tail', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail1'],
neg_cap=neg_cap)
cir.add_transistor(gm1_params, 'midp', 'inn', 'tail', 'gnd', fg=seg_dict['in'],
neg_cap=neg_cap)
cir.add_transistor(gm1_params, 'midn', 'inp', 'tail', 'gnd', fg=seg_dict['in'],
neg_cap=neg_cap)
cir.add_transistor(diode1_params, 'midp', 'midp', 'gnd', 'gnd', fg=seg_dict['diode1'],
neg_cap=neg_cap)
cir.add_transistor(diode1_params, 'midn', 'midn', 'gnd', 'gnd', fg=seg_dict['diode1'],
neg_cap=neg_cap)
cir.add_transistor(ngm1_params, 'midn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm1'],
neg_cap=neg_cap)
cir.add_transistor(ngm1_params, 'midp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm1'],
neg_cap=neg_cap)
# stage 2
cir.add_transistor(tail2_params, 'outp', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'],
neg_cap=neg_cap)
cir.add_transistor(tail2_params, 'outn', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'],
neg_cap=neg_cap)
cir.add_transistor(diode2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['diode2'],
neg_cap=neg_cap)
cir.add_transistor(diode2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['diode2'],
neg_cap=neg_cap)
cir.add_transistor(ngm2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm2'],
neg_cap=neg_cap)
cir.add_transistor(ngm2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm2'],
neg_cap=neg_cap)
# parasitic cap
cir.add_cap(cpar1, 'midp', 'gnd')
cir.add_cap(cpar1, 'midn', 'gnd')
# load cap
cir.add_cap(cload, 'outp', 'gnd')
cir.add_cap(cload, 'outn', 'gnd')
# feedback resistors
if not no_fb:
cir.add_conductance(gz, 'xp', 'midn')
cir.add_conductance(gz, 'xn', 'midp')
# diff-to-single conversion
cir.add_vcvs(0.5, 'inp', 'gnd', 'in', 'gnd')
cir.add_vcvs(-0.5, 'inn', 'gnd', 'in', 'gnd')
cir.add_vcvs(1, 'out', 'gnd', 'outp', 'outn')
return cir
class OpAmpTwoStageChar(MeasurementManager):
def __init__(self,
data_dir, # type: str
meas_name, # type: str
impl_lib, # type: str
specs, # type: Dict[str, Any]
wrapper_lookup, # type: Dict[str, str]
sim_view_list, # type: Sequence[Tuple[str, str]]
env_list, # type: Sequence[str]
):
MeasurementManager.__init__(self, data_dir, meas_name, impl_lib, specs, wrapper_lookup,
sim_view_list, env_list)
def get_initial_state(self):
# type: () -> str
"""Returns the initial FSM state."""
return 'ac0'
def get_testbench_info(self, state, prev_output):
rfb0 = self.specs['rfb']
cfb0 = self.specs['cfb']
find_cfb = self.specs.get('find_cfb', True)
res_var = self.specs['res_var']
cmin_scale = self.specs['cmin_scale']
cmax_scale = self.specs['cmax_scale']
num_pts = self.specs['num_pts']
tmp = super(OpAmpTwoStageChar, self).get_testbench_info('ac', prev_output)
tb_name, tb_type, tb_specs, tb_params = tmp
if state == 'ac0' and find_cfb:
cfb_list = np.linspace(cfb0 * cmin_scale, cfb0 * cmax_scale, num_pts).tolist()
tb_specs['sim_vars']['rfb'] = rfb0 * (1 - res_var)
tb_specs['sim_vars']['cfb'] = cfb_list
else:
if find_cfb:
cfb = self.get_state_output('ac0')['cfb']
else:
cfb = cfb0
tb_specs['sim_vars']['rfb'] = rfb0
tb_specs['sim_vars']['cfb'] = cfb
return tb_name, tb_type, tb_specs, tb_params
def process_output(self, state, data, tb_manager):
# type: (str, Dict[str, Any], ACTB) -> Tuple[bool, str, Dict[str, Any]]
phase_margin = self.specs['phase_margin']
find_cfb = self.specs.get('find_cfb', True)
output_list = ['vout']
results = tb_manager.get_ugb_and_pm(data, output_list)
if state == 'ac0' and find_cfb:
done = False
next_state = 'ac1'
cfb = self._find_min_cfb(phase_margin, results)
output = dict(cfb=cfb)
else:
done = True
next_state = ''
if find_cfb:
cfb = self.get_state_output('ac0')['cfb']
else:
cfb = self.specs['cfb']
gain_results = tb_manager.get_gain_and_w3db(data, output_list, output_dict=results)
corner_list = results['corner'].tolist()
gain_list = gain_results['gain_vout'].tolist()
bw_list = gain_results['w3db_vout'].tolist()
funity_list = results['funity_vout'].tolist()
pm_list = results['pm_vout'].tolist()
output = dict(cfb=cfb, corners=corner_list, gain=gain_list, bw=bw_list,
funity=funity_list, pm=pm_list)
return done, next_state, output
@classmethod
def _find_min_cfb(cls, phase_margin, results):
axis_names = ['corner', 'cfb']
corner_list = results['corner']
corner_sort_arg = np.argsort(corner_list) # type: Sequence[int]
# rearrange array axis
sweep_vars = results['sweep_params']['pm_vout']
order = [sweep_vars.index(name) for name in axis_names]
pm_data = np.transpose(results['pm_vout'], axes=order)
# determine minimum cfb
cfb_vec = results['cfb']
cfb_idx_min = 0
for corner_idx in corner_sort_arg:
bin_iter = BinaryIterator(cfb_idx_min, cfb_vec.size)
while bin_iter.has_next():
cur_cfb_idx = bin_iter.get_next()
pm = pm_data[corner_idx, cur_cfb_idx]
if pm >= phase_margin:
bin_iter.save()
bin_iter.down()
else:
bin_iter.up()
cfb_idx_min = bin_iter.get_last_save()
if cfb_idx_min is None:
# No solution; cannot make amplifier stable
break
if cfb_idx_min is None:
raise ValueError('Cannot determine cfb.')
else:
cfb = cfb_vec[cfb_idx_min]
return cfb.item() | en | 0.679913 | # -*- coding: utf-8 -*- This module contains design algorithm for a traditional two stage operational amplifier. Tail transistor of the first stage op amp. Due to layout restrictions, the tail transistor needs to have the same number of fingers and stack number as the input transistor. This method finds the optimal width/intent. # type: (MOSDBDiscrete) -> None # type: List[float] # type: List[float] # type: List[float] # type: float # type: float # type: int # type: int # type: (...) -> None # find vgs for each corner # no solution # type: float # type: () -> Optional[Dict[str, Any]] # type: float A two stage fully differential operational amplifier. The first stage is a differential amplifier with diode + positive feedback load, the second stage is a psuedo-differential common source amplifier. This topology has the following advantages: 1. large output swing. 2. Common mode feedback is only required for the second stage. # type: (MOSDBDiscrete, MOSDBDiscrete) -> None # type: List[float] # type: int # type: List[float] # type: List[float] # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: int # type: float # type: bool # type: int # type: Optional[List[int]] # type: (...) -> None # binary search for minimum stage 1 current, # linear search to find optimal scale2 # found new minimum. close in to find optimal i1 size # type: int # type: List[float] # type: List[float] # type: List[float] # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: int # type: float # type: bool # type: int # type: Optional[List[int]] # type: (...) -> None # design load # design input gm # design stage 1 tail # design stage 2 gm # type: () -> Optional[Dict[str, Any]] # type: (Dict[str, Any]) -> Dict[str, Any] top_specs['tb_dc']['tb_params']['vimax'] = vdd top_specs['tb_dc']['tb_params']['vimin'] = -vdd top_specs['tb_dc']['tb_params']['vindc'] = vindc top_specs['tb_dc']['tb_params']['voutcm'] = voutdc top_specs['tb_dc']['tb_params']['ibias'] = ibias top_specs['tb_dc']['tb_params']['vdd'] = vdd top_specs['tb_dc']['tb_params']['voutref'] = voutdc top_specs['tb_dc']['tb_params']['vout_start'] = -vdd + 0.15 top_specs['tb_dc']['tb_params']['vout_stop'] = vdd - 0.15 # step 1: find stage 2 unit size # divide seg_gcd by 2 to make sure all generated segment numbers are even # make sure we have enough tail fingers for common mode feedback # noinspection PyTypeChecker # find min_size such that amplifier is stable Find minimum miller cap that stabilizes the system. NOTE: This function assume phase of system for any miller cap value will not loop around 360, otherwise it may get the phase margin wrong. This assumption should be valid for this op amp. # find maximum Cf needed to stabilize all corners # no way to make amplifier stable, just return # bin_iter is guaranteed to save at least one value, so don't need to worry about # cf_min being None # find gain, unity gain bandwidth, and phase margin across corners # stage 1 # stage 2 # parasitic cap # load cap # feedback resistors # diff-to-single conversion # type: str # type: str # type: str # type: Dict[str, Any] # type: Dict[str, str] # type: Sequence[Tuple[str, str]] # type: Sequence[str] # type: () -> str Returns the initial FSM state. # type: (str, Dict[str, Any], ACTB) -> Tuple[bool, str, Dict[str, Any]] # type: Sequence[int] # rearrange array axis # determine minimum cfb # No solution; cannot make amplifier stable | 2.785902 | 3 |
alipay/aop/api/domain/AlipayMerchantAuthDeleteModel.py | antopen/alipay-sdk-python-all | 0 | 9430 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMerchantAuthDeleteModel(object):
def __init__(self):
self._channel_code = None
self._operator_id = None
self._role = None
self._scene_code = None
self._user_id_list = None
@property
def channel_code(self):
return self._channel_code
@channel_code.setter
def channel_code(self, value):
self._channel_code = value
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def role(self):
return self._role
@role.setter
def role(self, value):
self._role = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def user_id_list(self):
return self._user_id_list
@user_id_list.setter
def user_id_list(self, value):
if isinstance(value, list):
self._user_id_list = list()
for i in value:
self._user_id_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.channel_code:
if hasattr(self.channel_code, 'to_alipay_dict'):
params['channel_code'] = self.channel_code.to_alipay_dict()
else:
params['channel_code'] = self.channel_code
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
if self.role:
if hasattr(self.role, 'to_alipay_dict'):
params['role'] = self.role.to_alipay_dict()
else:
params['role'] = self.role
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.user_id_list:
if isinstance(self.user_id_list, list):
for i in range(0, len(self.user_id_list)):
element = self.user_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.user_id_list[i] = element.to_alipay_dict()
if hasattr(self.user_id_list, 'to_alipay_dict'):
params['user_id_list'] = self.user_id_list.to_alipay_dict()
else:
params['user_id_list'] = self.user_id_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMerchantAuthDeleteModel()
if 'channel_code' in d:
o.channel_code = d['channel_code']
if 'operator_id' in d:
o.operator_id = d['operator_id']
if 'role' in d:
o.role = d['role']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'user_id_list' in d:
o.user_id_list = d['user_id_list']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMerchantAuthDeleteModel(object):
def __init__(self):
self._channel_code = None
self._operator_id = None
self._role = None
self._scene_code = None
self._user_id_list = None
@property
def channel_code(self):
return self._channel_code
@channel_code.setter
def channel_code(self, value):
self._channel_code = value
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def role(self):
return self._role
@role.setter
def role(self, value):
self._role = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def user_id_list(self):
return self._user_id_list
@user_id_list.setter
def user_id_list(self, value):
if isinstance(value, list):
self._user_id_list = list()
for i in value:
self._user_id_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.channel_code:
if hasattr(self.channel_code, 'to_alipay_dict'):
params['channel_code'] = self.channel_code.to_alipay_dict()
else:
params['channel_code'] = self.channel_code
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
if self.role:
if hasattr(self.role, 'to_alipay_dict'):
params['role'] = self.role.to_alipay_dict()
else:
params['role'] = self.role
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.user_id_list:
if isinstance(self.user_id_list, list):
for i in range(0, len(self.user_id_list)):
element = self.user_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.user_id_list[i] = element.to_alipay_dict()
if hasattr(self.user_id_list, 'to_alipay_dict'):
params['user_id_list'] = self.user_id_list.to_alipay_dict()
else:
params['user_id_list'] = self.user_id_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMerchantAuthDeleteModel()
if 'channel_code' in d:
o.channel_code = d['channel_code']
if 'operator_id' in d:
o.operator_id = d['operator_id']
if 'role' in d:
o.role = d['role']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'user_id_list' in d:
o.user_id_list = d['user_id_list']
return o
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.933715 | 2 |
test/torchaudio_unittest/models/emformer/emformer_cpu_test.py | LaudateCorpus1/audio | 0 | 9431 | import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from torchaudio_unittest.models.emformer.emformer_test_impl import EmformerTestImpl
class EmformerFloat32CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class EmformerFloat64CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
| import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from torchaudio_unittest.models.emformer.emformer_test_impl import EmformerTestImpl
class EmformerFloat32CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class EmformerFloat64CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
| none | 1 | 2.387281 | 2 |
|
src/nba_analysis/pipelines/data_processing/pipeline.py | stanton119/nba-analysis | 0 | 9432 | <reponame>stanton119/nba-analysis
"""
Two pipelines:
* full history
* update latest season
* Only updates latest season year
"""
from functools import partial
import itertools
from kedro.pipeline import Pipeline, node
from nba_analysis.pipelines.data_processing import basketball_reference
from . import nodes
def create_pipeline(**kwargs):
season_range = range(2018, 2021)
download_nodes = [
node(
func=partial(nodes.download_season_data, season=season),
inputs=[],
outputs=f"season_data_{season}",
name=f"download_season_data_{season}_node",
)
for season in season_range
]
# month_range = ['october','november','december','january','february','march','april','may','june','july','august','september']
# download_game_log_nodes = [
# node(
# func=partial(nodes.download_game_log_data, season=season, month=month),
# inputs=[],
# outputs=f"game_log_data_{season}_{month}",
# name=f"download_game_log_data_{season}_{month}_node",
# )
# for season, month in itertools.product(season_range,month_range)
# ]
download_game_log_nodes = [
node(
func=partial(
basketball_reference.get_full_season_game_log, season=season
),
inputs=[],
outputs=f"game_log_data_{season}",
name=f"download_game_log_data_{season}_node",
)
for season in season_range
]
process_game_log_nodes = [
node(
func=basketball_reference.process_df_game_log,
inputs=f"game_log_data_{season}",
outputs=f"game_log_data_{season}_int",
name=f"process_game_log_data_{season}_node",
)
for season in season_range
]
return Pipeline(
[
*download_nodes,
node(
func=nodes.process_season_data,
inputs=[f"season_data_{season}" for season in season_range],
outputs="cleaned_season_data",
name="process_season_data_node",
),
*download_game_log_nodes,
*process_game_log_nodes,
]
)
| """
Two pipelines:
* full history
* update latest season
* Only updates latest season year
"""
from functools import partial
import itertools
from kedro.pipeline import Pipeline, node
from nba_analysis.pipelines.data_processing import basketball_reference
from . import nodes
def create_pipeline(**kwargs):
season_range = range(2018, 2021)
download_nodes = [
node(
func=partial(nodes.download_season_data, season=season),
inputs=[],
outputs=f"season_data_{season}",
name=f"download_season_data_{season}_node",
)
for season in season_range
]
# month_range = ['october','november','december','january','february','march','april','may','june','july','august','september']
# download_game_log_nodes = [
# node(
# func=partial(nodes.download_game_log_data, season=season, month=month),
# inputs=[],
# outputs=f"game_log_data_{season}_{month}",
# name=f"download_game_log_data_{season}_{month}_node",
# )
# for season, month in itertools.product(season_range,month_range)
# ]
download_game_log_nodes = [
node(
func=partial(
basketball_reference.get_full_season_game_log, season=season
),
inputs=[],
outputs=f"game_log_data_{season}",
name=f"download_game_log_data_{season}_node",
)
for season in season_range
]
process_game_log_nodes = [
node(
func=basketball_reference.process_df_game_log,
inputs=f"game_log_data_{season}",
outputs=f"game_log_data_{season}_int",
name=f"process_game_log_data_{season}_node",
)
for season in season_range
]
return Pipeline(
[
*download_nodes,
node(
func=nodes.process_season_data,
inputs=[f"season_data_{season}" for season in season_range],
outputs="cleaned_season_data",
name="process_season_data_node",
),
*download_game_log_nodes,
*process_game_log_nodes,
]
) | en | 0.463724 | Two pipelines: * full history * update latest season * Only updates latest season year # month_range = ['october','november','december','january','february','march','april','may','june','july','august','september'] # download_game_log_nodes = [ # node( # func=partial(nodes.download_game_log_data, season=season, month=month), # inputs=[], # outputs=f"game_log_data_{season}_{month}", # name=f"download_game_log_data_{season}_{month}_node", # ) # for season, month in itertools.product(season_range,month_range) # ] | 2.69307 | 3 |
IceSpringMusicPlayer/plugins/IceSpringHelloWorldPlugin/helloWorldPlugin.py | baijifeilong/rawsteelp | 0 | 9433 | <filename>IceSpringMusicPlayer/plugins/IceSpringHelloWorldPlugin/helloWorldPlugin.py
# Created by <EMAIL> at 2022/1/21 17:13
import typing
from IceSpringRealOptional.typingUtils import gg
from PySide2 import QtWidgets, QtCore
from IceSpringMusicPlayer import tt
from IceSpringMusicPlayer.common.pluginMixin import PluginMixin
from IceSpringMusicPlayer.common.pluginWidgetMixin import PluginWidgetMixin
from IceSpringMusicPlayer.tt import Text
class HelloWorldPlugin(QtWidgets.QWidget, PluginMixin, PluginWidgetMixin):
@classmethod
def getPluginName(cls) -> Text:
return tt.HelloWorldPlugin_Name
@classmethod
def getPluginReplacers(cls) -> typing.Dict[Text, typing.Callable[[], PluginWidgetMixin]]:
return {tt.HelloWorldWidget_Name: lambda: cls()}
def __init__(self):
super().__init__()
label = QtWidgets.QLabel("Hello World")
label.setAlignment(gg(QtCore.Qt.AlignmentFlag.AlignCenter))
self.setLayout(QtWidgets.QGridLayout())
self.layout().addWidget(label)
| <filename>IceSpringMusicPlayer/plugins/IceSpringHelloWorldPlugin/helloWorldPlugin.py
# Created by <EMAIL> at 2022/1/21 17:13
import typing
from IceSpringRealOptional.typingUtils import gg
from PySide2 import QtWidgets, QtCore
from IceSpringMusicPlayer import tt
from IceSpringMusicPlayer.common.pluginMixin import PluginMixin
from IceSpringMusicPlayer.common.pluginWidgetMixin import PluginWidgetMixin
from IceSpringMusicPlayer.tt import Text
class HelloWorldPlugin(QtWidgets.QWidget, PluginMixin, PluginWidgetMixin):
@classmethod
def getPluginName(cls) -> Text:
return tt.HelloWorldPlugin_Name
@classmethod
def getPluginReplacers(cls) -> typing.Dict[Text, typing.Callable[[], PluginWidgetMixin]]:
return {tt.HelloWorldWidget_Name: lambda: cls()}
def __init__(self):
super().__init__()
label = QtWidgets.QLabel("Hello World")
label.setAlignment(gg(QtCore.Qt.AlignmentFlag.AlignCenter))
self.setLayout(QtWidgets.QGridLayout())
self.layout().addWidget(label)
| en | 0.51972 | # Created by <EMAIL> at 2022/1/21 17:13 | 1.926301 | 2 |
SWHT/Ylm.py | 2baOrNot2ba/SWHT | 0 | 9434 | <gh_stars>0
"""
An implementation on spherical harmonics in python becasue scipy.special.sph_harm in scipy<=0.13 is very slow
Originally written by <NAME>
https://github.com/scipy/scipy/issues/1280
"""
import numpy as np
def xfact(m):
# computes (2m-1)!!/sqrt((2m)!)
res = 1.
for i in xrange(1, 2*m+1):
if i % 2: res *= i # (2m-1)!!
res /= np.sqrt(i) # sqrt((2m)!)
return res
def lplm_n(l, m, x):
# associated legendre polynomials normalized as in Ylm, from Numerical Recipes 6.7
l,m = int(l),int(m)
assert 0<=m<=l and np.all(np.abs(x)<=1.)
norm = np.sqrt(2. * l + 1.) / np.sqrt(4. * np.pi)
if m == 0:
pmm = norm * np.ones_like(x)
else:
pmm = (-1.)**m * norm * xfact(m) * (1.-x**2.)**(m/2.)
if l == m:
return pmm
pmmp1 = x * pmm * np.sqrt(2.*m+1.)
if l == m+1:
return pmmp1
for ll in xrange(m+2, l+1):
pll = (x*(2.*ll-1.)*pmmp1 - np.sqrt( (ll-1.)**2. - m**2.)*pmm)/np.sqrt(ll**2.-m**2.)
pmm = pmmp1
pmmp1 = pll
return pll
def Ylm(l, m, phi, theta):
# spherical harmonics
# theta is from 0 to pi with pi/2 on equator
l,m = int(l),int(m)
assert 0 <= np.abs(m) <=l
if m > 0:
return lplm_n(l, m, np.cos(theta)) * np.exp(1J * m * phi)
elif m < 0:
return (-1.)**m * lplm_n(l, -m, np.cos(theta)) * np.exp(1J * m * phi)
return lplm_n(l, m, np.cos(theta)) * np.ones_like(phi)
def Ylmr(l, m, phi, theta):
# real spherical harmonics
# theta is from 0 to pi with pi/2 on equator
l,m = int(l),int(m)
assert 0 <= np.abs(m) <=l
if m > 0:
return lplm_n(l, m, np.cos(theta)) * np.cos(m * phi) * np.sqrt(2.)
elif m < 0:
return (-1.)**m * lplm_n(l, -m, np.cos(theta)) * np.sin(-m * phi) * np.sqrt(2.)
return lplm_n(l, m, np.cos(theta)) * np.ones_like(phi)
if __name__ == "__main__":
from scipy.special import sph_harm
from scipy.misc import factorial2, factorial
from timeit import Timer
def ref_xfact(m):
return factorial2(2*m-1)/np.sqrt(factorial(2*m))
print "Time: xfact(10)", Timer("xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(10)", Timer("ref_xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: xfact(80)", Timer("xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(80)", Timer("ref_xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "m", "xfact", "ref_xfact"
for m in range(10) + range(80,90):
a = xfact(m)
b = ref_xfact(m)
print m, a, b
phi, theta = np.ogrid[0:2*np.pi:10j,-np.pi/2:np.pi/2:10j]
print "Time: Ylm(1,1,phi,theta)", Timer("Ylm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "Time: sph_harm(1,1,phi,theta)", Timer("sph_harm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "l", "m", "max|Ylm-sph_harm|"
for l in xrange(0,10):
for m in xrange(-l,l+1):
a = Ylm(l,m,phi,theta)
b = sph_harm(m,l,phi,theta)
print l,m, np.amax(np.abs(a-b))
| """
An implementation on spherical harmonics in python becasue scipy.special.sph_harm in scipy<=0.13 is very slow
Originally written by <NAME>
https://github.com/scipy/scipy/issues/1280
"""
import numpy as np
def xfact(m):
# computes (2m-1)!!/sqrt((2m)!)
res = 1.
for i in xrange(1, 2*m+1):
if i % 2: res *= i # (2m-1)!!
res /= np.sqrt(i) # sqrt((2m)!)
return res
def lplm_n(l, m, x):
# associated legendre polynomials normalized as in Ylm, from Numerical Recipes 6.7
l,m = int(l),int(m)
assert 0<=m<=l and np.all(np.abs(x)<=1.)
norm = np.sqrt(2. * l + 1.) / np.sqrt(4. * np.pi)
if m == 0:
pmm = norm * np.ones_like(x)
else:
pmm = (-1.)**m * norm * xfact(m) * (1.-x**2.)**(m/2.)
if l == m:
return pmm
pmmp1 = x * pmm * np.sqrt(2.*m+1.)
if l == m+1:
return pmmp1
for ll in xrange(m+2, l+1):
pll = (x*(2.*ll-1.)*pmmp1 - np.sqrt( (ll-1.)**2. - m**2.)*pmm)/np.sqrt(ll**2.-m**2.)
pmm = pmmp1
pmmp1 = pll
return pll
def Ylm(l, m, phi, theta):
# spherical harmonics
# theta is from 0 to pi with pi/2 on equator
l,m = int(l),int(m)
assert 0 <= np.abs(m) <=l
if m > 0:
return lplm_n(l, m, np.cos(theta)) * np.exp(1J * m * phi)
elif m < 0:
return (-1.)**m * lplm_n(l, -m, np.cos(theta)) * np.exp(1J * m * phi)
return lplm_n(l, m, np.cos(theta)) * np.ones_like(phi)
def Ylmr(l, m, phi, theta):
# real spherical harmonics
# theta is from 0 to pi with pi/2 on equator
l,m = int(l),int(m)
assert 0 <= np.abs(m) <=l
if m > 0:
return lplm_n(l, m, np.cos(theta)) * np.cos(m * phi) * np.sqrt(2.)
elif m < 0:
return (-1.)**m * lplm_n(l, -m, np.cos(theta)) * np.sin(-m * phi) * np.sqrt(2.)
return lplm_n(l, m, np.cos(theta)) * np.ones_like(phi)
if __name__ == "__main__":
from scipy.special import sph_harm
from scipy.misc import factorial2, factorial
from timeit import Timer
def ref_xfact(m):
return factorial2(2*m-1)/np.sqrt(factorial(2*m))
print "Time: xfact(10)", Timer("xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(10)", Timer("ref_xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: xfact(80)", Timer("xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(80)", Timer("ref_xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "m", "xfact", "ref_xfact"
for m in range(10) + range(80,90):
a = xfact(m)
b = ref_xfact(m)
print m, a, b
phi, theta = np.ogrid[0:2*np.pi:10j,-np.pi/2:np.pi/2:10j]
print "Time: Ylm(1,1,phi,theta)", Timer("Ylm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "Time: sph_harm(1,1,phi,theta)", Timer("sph_harm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "l", "m", "max|Ylm-sph_harm|"
for l in xrange(0,10):
for m in xrange(-l,l+1):
a = Ylm(l,m,phi,theta)
b = sph_harm(m,l,phi,theta)
print l,m, np.amax(np.abs(a-b)) | en | 0.881431 | An implementation on spherical harmonics in python becasue scipy.special.sph_harm in scipy<=0.13 is very slow Originally written by <NAME> https://github.com/scipy/scipy/issues/1280 # computes (2m-1)!!/sqrt((2m)!) # (2m-1)!! # sqrt((2m)!) # associated legendre polynomials normalized as in Ylm, from Numerical Recipes 6.7 # spherical harmonics # theta is from 0 to pi with pi/2 on equator # real spherical harmonics # theta is from 0 to pi with pi/2 on equator | 2.771571 | 3 |
0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | 1 | 9435 | <filename>0673.GCBA-HOTEL_STAFF.py
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup
import datetime
import io
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[10]:
url1 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2020/11/Eoh_PnoA_0811.xlsx"
df1 = pd.read_excel(url1)
df1[:2] = df1[:2].ffill(1)
df1.columns = "Personal No Asalariado - " + df1.iloc[1] + " - " + df1.iloc[2]
df1 = df1.drop(df1.columns[[1]], axis = 1)
df1 = df1.drop(index=1)
df1 = df1.drop(index=0)
df1 = df1.drop(index=2)
df1 = df1.dropna(subset = [df1.columns[3]])
#df1 = df1.iloc[2: , 3:-2]
#df1 = df1[~df1.iloc[:, 0].astype(str).str.isdigit()]
df1 = df1[df1.columns.dropna()]
df1.index = pd.date_range(start='1/1/2008', periods=len(df1), freq = "QS")
df1.index.name = "Date"
#df1 = df1[df1.columns.drop(list(df1.filter(regex='Participación')))]
df1
# In[11]:
url2 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2018/05/Eoh_PA_0811.xlsx"
df2 = pd.read_excel(url2)
df2[:2] = df2[:2].ffill(1)
df2.columns = "Personal Asalariado - " + df2.iloc[1] + " - " + df2.iloc[2]
df2 = df2.drop(df2.columns[[1]], axis = 1)
df2 = df2.drop(index=1)
df2 = df2.drop(index=0)
df2 = df2.drop(index=2)
df2 = df2.dropna(subset = [df2.columns[3]])
#df2 = df2.iloc[2: , 3:-2]
#df2 = df2[~df2.iloc[:, 0].astype(str).str.isdigit()]
df2 = df2[df2.columns.dropna()]
df2.index = pd.date_range(start='1/1/2008', periods=len(df2), freq = "QS")
df2.index.name = "Date"
df3 = df1.merge(df2, right_index=True, left_index=True)
alphacast.datasets.dataset(7432).upload_data_from_df(df3,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
| <filename>0673.GCBA-HOTEL_STAFF.py
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup
import datetime
import io
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[10]:
url1 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2020/11/Eoh_PnoA_0811.xlsx"
df1 = pd.read_excel(url1)
df1[:2] = df1[:2].ffill(1)
df1.columns = "Personal No Asalariado - " + df1.iloc[1] + " - " + df1.iloc[2]
df1 = df1.drop(df1.columns[[1]], axis = 1)
df1 = df1.drop(index=1)
df1 = df1.drop(index=0)
df1 = df1.drop(index=2)
df1 = df1.dropna(subset = [df1.columns[3]])
#df1 = df1.iloc[2: , 3:-2]
#df1 = df1[~df1.iloc[:, 0].astype(str).str.isdigit()]
df1 = df1[df1.columns.dropna()]
df1.index = pd.date_range(start='1/1/2008', periods=len(df1), freq = "QS")
df1.index.name = "Date"
#df1 = df1[df1.columns.drop(list(df1.filter(regex='Participación')))]
df1
# In[11]:
url2 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2018/05/Eoh_PA_0811.xlsx"
df2 = pd.read_excel(url2)
df2[:2] = df2[:2].ffill(1)
df2.columns = "Personal Asalariado - " + df2.iloc[1] + " - " + df2.iloc[2]
df2 = df2.drop(df2.columns[[1]], axis = 1)
df2 = df2.drop(index=1)
df2 = df2.drop(index=0)
df2 = df2.drop(index=2)
df2 = df2.dropna(subset = [df2.columns[3]])
#df2 = df2.iloc[2: , 3:-2]
#df2 = df2[~df2.iloc[:, 0].astype(str).str.isdigit()]
df2 = df2[df2.columns.dropna()]
df2.index = pd.date_range(start='1/1/2008', periods=len(df2), freq = "QS")
df2.index.name = "Date"
df3 = df1.merge(df2, right_index=True, left_index=True)
alphacast.datasets.dataset(7432).upload_data_from_df(df3,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
| en | 0.12904 | #!/usr/bin/env python # coding: utf-8 # In[9]: # In[10]: #df1 = df1.iloc[2: , 3:-2] #df1 = df1[~df1.iloc[:, 0].astype(str).str.isdigit()] #df1 = df1[df1.columns.drop(list(df1.filter(regex='Participación')))] # In[11]: #df2 = df2.iloc[2: , 3:-2] #df2 = df2[~df2.iloc[:, 0].astype(str).str.isdigit()] | 2.64759 | 3 |
simpleGmatch4py.py | aravi11/approxGed | 0 | 9436 | <reponame>aravi11/approxGed
# import the GED using the munkres algorithm
import gmatch4py as gm
import networkx as nx
import collections
import csv
import pickle
from collections import OrderedDict
import json
import concurrent.futures as cf
import time
iter = 0
def getFinishedStatus():
iter +=1
print('*******\t' + str(iter)+ "\t*******")
def getGraphDiff(files):
dotFile_data_path = './DotFiles/'
file1 = files.split(',')[0]
file2 = files.split(',')[1]
g1_name = file1.split('.')[0] # gets the name of first dotFile without its extension
g2_name = file2.split('.')[0] # gets the name of second dotFile without its extension
#print("\n Started pair: "+ str(g1_name) + ', ' + str(g2_name))
graph_1 = nx.drawing.nx_pydot.read_dot(str(dotFile_data_path) + str(file1))
graph_2 = nx.drawing.nx_pydot.read_dot(str(dotFile_data_path) + str(file2))
jsonData = getJsonData(graph_1, graph_2)
dumpJson(jsonData, g1_name, g2_name)
#print("\n >>>Finished pair: "+ str(g1_name) + ', ' + str(g2_name))
#getFinishedStatus()
#print('Total time : '+str(totalTime)+ '\n')
'''
def runParallelCode(pairList):
with cf.ProcessPoolExecutor(max_workers =2) as executor:
try:
for future in cf.as_completed((executor.map(getGraphDiff, pairList, timeout=5000000)), timeout=5000000):
print(str(type(future.result())))
if str(type(future.result())) == "<class 'NoneType'>":
pass
else:
print(future.result(timeout=5000000))
except cf._base.TimeoutError:
print("Time limit exceeded")
pass
'''
def runParallelCode(pairList):
with cf.ProcessPoolExecutor(max_workers =2) as executor:
try:
result = executor.map(getGraphDiff, pairList, timeout=5000000)
for r in result:
if str(type(r)) == "<class 'NoneType'>":
pass
else:
print(r)
except cf._base.TimeoutError:
print("Time limit exceeded")
pass
def getJsonData(graph_1,graph_2):
g1_edgeList = []
g2_edgeList = []
# convert the node labels which are strings to sorted integers without affecting the node attributes.
sortedIntGraph_1 = nx.relabel.convert_node_labels_to_integers(graph_1, first_label=0, ordering='sorted', label_attribute=None)
sortedIntGraph_2 = nx.relabel.convert_node_labels_to_integers(graph_2, first_label=0, ordering='sorted', label_attribute=None)
g1_edgeTuple = list(sortedIntGraph_1.edges(data=False))
g2_edgeTuple = list(sortedIntGraph_2.edges(data=False))
# get graph edge lists
for i in g1_edgeTuple:
g1_edgeList.append(list(i))
for i in g2_edgeTuple:
g2_edgeList.append(list(i))
# get graph attributes in the ascending order as the node labels
nodeLabelList_g1 = []
nodeLabelList_g2 = []
nodeList_g1 = list(sortedIntGraph_1.nodes(data=True))
nodeList_g2 = list(sortedIntGraph_2.nodes(data=True))
for i in range(len(nodeList_g1)):
if nodeList_g1[i][0] == i:
nodeLabelList_g1.insert(i, nodeList_g1[i][1].get('label').replace('"', ''))
for i in range(len(nodeList_g2)):
if nodeList_g2[i][0] == i:
nodeLabelList_g2.insert(i, nodeList_g2[i][1].get('label').replace('"', ''))
# get graph edit distance
#ged = nx.graph_edit_distance(sortedIntGraph_1, sortedIntGraph_2, node_match=return_eq) Commented since its too time expensive
#Gmatch4py code for calculating ged
#abs_ged = gm.BP_2(1,1,1,1)
ged=gm.GraphEditDistance(1,1,1,1) # all edit costs are equal to 1
#hed = gm.HED(1,1,1,1)
result = ged.compare([sortedIntGraph_1, sortedIntGraph_2], None)
# generate the json files
jsonDict = {}
jsonDict["graph_1"] = g1_edgeList
jsonDict["graph_2"] = g2_edgeList
jsonDict["labels_1"] = nodeLabelList_g1
jsonDict["labels_2"] = nodeLabelList_g2
jsonDict["ged"] = int(result[0][1])
#print(jsonDict)
return jsonDict
def return_eq(node1, node2): #function to compare the node labels
return node1['label']==node2['label']
def dumpJson(jsonFile, g1, g2): #function to dump the Json files
outPath = './outFiles/'
with open(str(outPath)+ str(g1) + '::::'+ str(g2) + '.json', 'w') as fp:
json.dump(jsonFile, fp)
def main(): #main function from where the program starts
dotFileList= []
#dotFile_data_path = './DotFiles/test'
with open('./filenames.txt', 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
dotName = str(row).replace('[', '').replace(']','').replace("'","").strip()
dotFileList.append(dotName)
print("Total number of graph files: " + str(len(dotFileList)))
counter = 0
len_dotFileList = len(dotFileList)
totalGraphJsons = len_dotFileList * len_dotFileList #total number of graph similarity json samples
print("Total Graph Similarity json samples: " + str(int(totalGraphJsons)))
pairList = []
#Code for generating graph Similarity json. Takes a non-symmetric pair of graphs from a list and returns their json data
for dotFile_i in dotFileList:
for dotFile_j in dotFileList:
pairList.append(str(dotFile_i + ','+ str(dotFile_j)))
print("<<<<<<<<<<<<<<<<<<<<<< " + str(len(pairList)))
runParallelCode(pairList)
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| # import the GED using the munkres algorithm
import gmatch4py as gm
import networkx as nx
import collections
import csv
import pickle
from collections import OrderedDict
import json
import concurrent.futures as cf
import time
iter = 0
def getFinishedStatus():
iter +=1
print('*******\t' + str(iter)+ "\t*******")
def getGraphDiff(files):
dotFile_data_path = './DotFiles/'
file1 = files.split(',')[0]
file2 = files.split(',')[1]
g1_name = file1.split('.')[0] # gets the name of first dotFile without its extension
g2_name = file2.split('.')[0] # gets the name of second dotFile without its extension
#print("\n Started pair: "+ str(g1_name) + ', ' + str(g2_name))
graph_1 = nx.drawing.nx_pydot.read_dot(str(dotFile_data_path) + str(file1))
graph_2 = nx.drawing.nx_pydot.read_dot(str(dotFile_data_path) + str(file2))
jsonData = getJsonData(graph_1, graph_2)
dumpJson(jsonData, g1_name, g2_name)
#print("\n >>>Finished pair: "+ str(g1_name) + ', ' + str(g2_name))
#getFinishedStatus()
#print('Total time : '+str(totalTime)+ '\n')
'''
def runParallelCode(pairList):
with cf.ProcessPoolExecutor(max_workers =2) as executor:
try:
for future in cf.as_completed((executor.map(getGraphDiff, pairList, timeout=5000000)), timeout=5000000):
print(str(type(future.result())))
if str(type(future.result())) == "<class 'NoneType'>":
pass
else:
print(future.result(timeout=5000000))
except cf._base.TimeoutError:
print("Time limit exceeded")
pass
'''
def runParallelCode(pairList):
with cf.ProcessPoolExecutor(max_workers =2) as executor:
try:
result = executor.map(getGraphDiff, pairList, timeout=5000000)
for r in result:
if str(type(r)) == "<class 'NoneType'>":
pass
else:
print(r)
except cf._base.TimeoutError:
print("Time limit exceeded")
pass
def getJsonData(graph_1,graph_2):
g1_edgeList = []
g2_edgeList = []
# convert the node labels which are strings to sorted integers without affecting the node attributes.
sortedIntGraph_1 = nx.relabel.convert_node_labels_to_integers(graph_1, first_label=0, ordering='sorted', label_attribute=None)
sortedIntGraph_2 = nx.relabel.convert_node_labels_to_integers(graph_2, first_label=0, ordering='sorted', label_attribute=None)
g1_edgeTuple = list(sortedIntGraph_1.edges(data=False))
g2_edgeTuple = list(sortedIntGraph_2.edges(data=False))
# get graph edge lists
for i in g1_edgeTuple:
g1_edgeList.append(list(i))
for i in g2_edgeTuple:
g2_edgeList.append(list(i))
# get graph attributes in the ascending order as the node labels
nodeLabelList_g1 = []
nodeLabelList_g2 = []
nodeList_g1 = list(sortedIntGraph_1.nodes(data=True))
nodeList_g2 = list(sortedIntGraph_2.nodes(data=True))
for i in range(len(nodeList_g1)):
if nodeList_g1[i][0] == i:
nodeLabelList_g1.insert(i, nodeList_g1[i][1].get('label').replace('"', ''))
for i in range(len(nodeList_g2)):
if nodeList_g2[i][0] == i:
nodeLabelList_g2.insert(i, nodeList_g2[i][1].get('label').replace('"', ''))
# get graph edit distance
#ged = nx.graph_edit_distance(sortedIntGraph_1, sortedIntGraph_2, node_match=return_eq) Commented since its too time expensive
#Gmatch4py code for calculating ged
#abs_ged = gm.BP_2(1,1,1,1)
ged=gm.GraphEditDistance(1,1,1,1) # all edit costs are equal to 1
#hed = gm.HED(1,1,1,1)
result = ged.compare([sortedIntGraph_1, sortedIntGraph_2], None)
# generate the json files
jsonDict = {}
jsonDict["graph_1"] = g1_edgeList
jsonDict["graph_2"] = g2_edgeList
jsonDict["labels_1"] = nodeLabelList_g1
jsonDict["labels_2"] = nodeLabelList_g2
jsonDict["ged"] = int(result[0][1])
#print(jsonDict)
return jsonDict
def return_eq(node1, node2): #function to compare the node labels
return node1['label']==node2['label']
def dumpJson(jsonFile, g1, g2): #function to dump the Json files
outPath = './outFiles/'
with open(str(outPath)+ str(g1) + '::::'+ str(g2) + '.json', 'w') as fp:
json.dump(jsonFile, fp)
def main(): #main function from where the program starts
dotFileList= []
#dotFile_data_path = './DotFiles/test'
with open('./filenames.txt', 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
dotName = str(row).replace('[', '').replace(']','').replace("'","").strip()
dotFileList.append(dotName)
print("Total number of graph files: " + str(len(dotFileList)))
counter = 0
len_dotFileList = len(dotFileList)
totalGraphJsons = len_dotFileList * len_dotFileList #total number of graph similarity json samples
print("Total Graph Similarity json samples: " + str(int(totalGraphJsons)))
pairList = []
#Code for generating graph Similarity json. Takes a non-symmetric pair of graphs from a list and returns their json data
for dotFile_i in dotFileList:
for dotFile_j in dotFileList:
pairList.append(str(dotFile_i + ','+ str(dotFile_j)))
print("<<<<<<<<<<<<<<<<<<<<<< " + str(len(pairList)))
runParallelCode(pairList)
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time)) | en | 0.636795 | # import the GED using the munkres algorithm # gets the name of first dotFile without its extension # gets the name of second dotFile without its extension #print("\n Started pair: "+ str(g1_name) + ', ' + str(g2_name)) #print("\n >>>Finished pair: "+ str(g1_name) + ', ' + str(g2_name)) #getFinishedStatus() #print('Total time : '+str(totalTime)+ '\n') def runParallelCode(pairList): with cf.ProcessPoolExecutor(max_workers =2) as executor: try: for future in cf.as_completed((executor.map(getGraphDiff, pairList, timeout=5000000)), timeout=5000000): print(str(type(future.result()))) if str(type(future.result())) == "<class 'NoneType'>": pass else: print(future.result(timeout=5000000)) except cf._base.TimeoutError: print("Time limit exceeded") pass # convert the node labels which are strings to sorted integers without affecting the node attributes. # get graph edge lists # get graph attributes in the ascending order as the node labels # get graph edit distance #ged = nx.graph_edit_distance(sortedIntGraph_1, sortedIntGraph_2, node_match=return_eq) Commented since its too time expensive #Gmatch4py code for calculating ged #abs_ged = gm.BP_2(1,1,1,1) # all edit costs are equal to 1 #hed = gm.HED(1,1,1,1) # generate the json files #print(jsonDict) #function to compare the node labels #function to dump the Json files #main function from where the program starts #dotFile_data_path = './DotFiles/test' #total number of graph similarity json samples #Code for generating graph Similarity json. Takes a non-symmetric pair of graphs from a list and returns their json data | 2.166478 | 2 |
src/blockdiag/utils/rst/nodes.py | Dridi/blockdiag | 0 | 9437 | <reponame>Dridi/blockdiag<filename>src/blockdiag/utils/rst/nodes.py
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from hashlib import sha1
from docutils import nodes
import blockdiag.parser
import blockdiag.builder
import blockdiag.drawer
class blockdiag(nodes.General, nodes.Element):
name = 'blockdiag'
processor = blockdiag
def to_diagram(self):
try:
tree = self.processor.parser.parse_string(self['code'])
except:
code = '%s { %s }' % (self.name, self['code'])
tree = self.processor.parser.parse_string(code)
self['code'] = code # replace if succeeded
return self.processor.builder.ScreenNodeBuilder.build(tree)
def to_drawer(self, image_format, filename, fontmap, **kwargs):
diagram = self.to_diagram()
return self.processor.drawer.DiagramDraw(image_format, diagram,
filename, fontmap=fontmap,
**kwargs)
def get_path(self, **options):
options.update(self['options'])
hashseed = (self['code'] + str(options)).encode('utf-8')
hashed = sha1(hashseed).hexdigest()
filename = "%s-%s.%s" % (self.name, hashed, options['format'].lower())
outputdir = options.get('outputdir')
if outputdir:
filename = os.path.join(outputdir, filename)
return filename
| # -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from hashlib import sha1
from docutils import nodes
import blockdiag.parser
import blockdiag.builder
import blockdiag.drawer
class blockdiag(nodes.General, nodes.Element):
name = 'blockdiag'
processor = blockdiag
def to_diagram(self):
try:
tree = self.processor.parser.parse_string(self['code'])
except:
code = '%s { %s }' % (self.name, self['code'])
tree = self.processor.parser.parse_string(code)
self['code'] = code # replace if succeeded
return self.processor.builder.ScreenNodeBuilder.build(tree)
def to_drawer(self, image_format, filename, fontmap, **kwargs):
diagram = self.to_diagram()
return self.processor.drawer.DiagramDraw(image_format, diagram,
filename, fontmap=fontmap,
**kwargs)
def get_path(self, **options):
options.update(self['options'])
hashseed = (self['code'] + str(options)).encode('utf-8')
hashed = sha1(hashseed).hexdigest()
filename = "%s-%s.%s" % (self.name, hashed, options['format'].lower())
outputdir = options.get('outputdir')
if outputdir:
filename = os.path.join(outputdir, filename)
return filename | en | 0.836079 | # -*- coding: utf-8 -*- # Copyright 2011 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # replace if succeeded | 2.254815 | 2 |
python-advanced/chp1/main.py | emiliachojak/bio-projects | 2 | 9438 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 20:00:00 2019
@author: <NAME>
@e-mail: <EMAIL>
"""
tax_dict = {
'Pan troglodytes' : 'Hominoidea', 'Pongo abelii' : 'Hominoidea',
'Hominoidea' : 'Simiiformes', 'Simiiformes' : 'Haplorrhini',
'Tarsius tarsier' : 'Tarsiiformes', 'Haplorrhini' : 'Primates',
'Tarsiiformes' : 'Haplorrhini', 'Loris tardigradus' :
'Lorisidae',
'Lorisidae' : 'Strepsirrhini', 'Strepsirrhini' : 'Primates',
'Allocebus trichotis' : 'Lemuriformes', 'Lemuriformes' :
'Strepsirrhini',
'Galago alleni' : 'Lorisiformes', 'Lorisiformes' :
'Strepsirrhini',
'Galago moholi' : 'Lorisiformes'
}
def find_ancestors(taxon):
if taxon == 'Primates':
return [taxon]
parent = tax_dict[taxon]
parent_ancestors = find_ancestors(parent)
return [taxon] + parent_ancestors
def find_ancestors_for_many(taxon_list):
many_parents = []
for taxon in taxon_list:
many_parents.append(find_ancestors(taxon))
return many_parents
def last_common_ancestor(many_parents):
for parent in many_parents[0]:
is_ok = True
for parent_list in many_parents:
if parent not in parent_list:
is_ok = False
if is_ok == True:
return parent
print(last_common_ancestor(find_ancestors_for_many(["Galago alleni", "Galago moholi"]))) | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 20:00:00 2019
@author: <NAME>
@e-mail: <EMAIL>
"""
tax_dict = {
'Pan troglodytes' : 'Hominoidea', 'Pongo abelii' : 'Hominoidea',
'Hominoidea' : 'Simiiformes', 'Simiiformes' : 'Haplorrhini',
'Tarsius tarsier' : 'Tarsiiformes', 'Haplorrhini' : 'Primates',
'Tarsiiformes' : 'Haplorrhini', 'Loris tardigradus' :
'Lorisidae',
'Lorisidae' : 'Strepsirrhini', 'Strepsirrhini' : 'Primates',
'Allocebus trichotis' : 'Lemuriformes', 'Lemuriformes' :
'Strepsirrhini',
'Galago alleni' : 'Lorisiformes', 'Lorisiformes' :
'Strepsirrhini',
'Galago moholi' : 'Lorisiformes'
}
def find_ancestors(taxon):
if taxon == 'Primates':
return [taxon]
parent = tax_dict[taxon]
parent_ancestors = find_ancestors(parent)
return [taxon] + parent_ancestors
def find_ancestors_for_many(taxon_list):
many_parents = []
for taxon in taxon_list:
many_parents.append(find_ancestors(taxon))
return many_parents
def last_common_ancestor(many_parents):
for parent in many_parents[0]:
is_ok = True
for parent_list in many_parents:
if parent not in parent_list:
is_ok = False
if is_ok == True:
return parent
print(last_common_ancestor(find_ancestors_for_many(["Galago alleni", "Galago moholi"]))) | en | 0.499979 | # -*- coding: utf-8 -*- Created on Thu Dec 19 20:00:00 2019 @author: <NAME> @e-mail: <EMAIL> | 2.884366 | 3 |
Python/csv/1.py | LeishenKOBE/good-good-study | 0 | 9439 | <filename>Python/csv/1.py<gh_stars>0
import csv
# with open('./1.csv', newline='', encoding='utf-8') as f:
# reader = csv.reader(f)
# for row in reader:
# print(row)
with open('./1.csv', 'a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['4', '猫砂', '25', '1022', '886'])
writer.writerow(['5', '猫罐头', '18', '2234', '3121'])
| <filename>Python/csv/1.py<gh_stars>0
import csv
# with open('./1.csv', newline='', encoding='utf-8') as f:
# reader = csv.reader(f)
# for row in reader:
# print(row)
with open('./1.csv', 'a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['4', '猫砂', '25', '1022', '886'])
writer.writerow(['5', '猫罐头', '18', '2234', '3121'])
| en | 0.77788 | # with open('./1.csv', newline='', encoding='utf-8') as f: # reader = csv.reader(f) # for row in reader: # print(row) | 3.252731 | 3 |
src/solana/rpc/responses.py | broper2/solana-py | 1 | 9440 | """This module contains code for parsing RPC responses."""
from dataclasses import dataclass, field
from typing import Union, Tuple, Any, Dict, List, Optional, Literal
from apischema import alias
from apischema.conversions import as_str
from solana.publickey import PublicKey
from solana.transaction import TransactionSignature
as_str(PublicKey)
TransactionErrorResult = Optional[dict]
@dataclass
class TransactionErr:
"""Container for possible transaction errors."""
err: TransactionErrorResult
@dataclass
class Context:
"""RPC result context."""
slot: int
@dataclass
class WithContext:
"""Base class for RPC result including context."""
context: Context
@dataclass
class AccountInfo:
"""Account information."""
lamports: int
owner: PublicKey
data: Union[Literal[""], Tuple[str, str], Dict[str, Any]]
executable: bool
rent_epoch: int = field(metadata=alias("rentEpoch"))
@dataclass
class AccountInfoAndContext(WithContext):
"""Account info and RPC result context."""
value: AccountInfo
@dataclass
class SubscriptionNotificationBase:
"""Base class for RPC subscription notifications."""
subscription: int
result: Any
@dataclass
class AccountNotification(SubscriptionNotificationBase):
"""Account subscription notification."""
result: AccountInfoAndContext
@dataclass
class LogItem(TransactionErr):
"""Container for logs from logSubscribe."""
signature: TransactionSignature
logs: Optional[List[str]]
@dataclass
class LogItemAndContext(WithContext):
"""Log item with RPC result context."""
value: LogItem
@dataclass
class LogsNotification(SubscriptionNotificationBase):
"""Logs subscription notification."""
result: LogItemAndContext
@dataclass
class ProgramAccount:
"""Program account pubkey and account info."""
pubkey: PublicKey
account: AccountInfo
@dataclass
class ProgramAccountAndContext(WithContext):
"""Program subscription data with RPC result context."""
value: ProgramAccount
@dataclass
class ProgramNotification(SubscriptionNotificationBase):
"""Program subscription notification."""
result: ProgramAccountAndContext
@dataclass
class SignatureErrAndContext(WithContext):
"""Signature subscription error info with RPC result context."""
value: TransactionErr
@dataclass
class SignatureNotification(SubscriptionNotificationBase):
"""Signature subscription notification."""
result: SignatureErrAndContext
@dataclass
class SlotBase:
"""Base class for slot container."""
slot: int
@dataclass
class SlotInfo(SlotBase):
"""Slot info."""
parent: int
root: int
@dataclass
class SlotNotification(SubscriptionNotificationBase):
"""Slot subscription notification."""
result: SlotInfo
@dataclass
class RootNotification(SubscriptionNotificationBase):
"""Root subscription notification."""
result: int
@dataclass
class SlotAndTimestampBase(SlotBase):
"""Base class for a slot with timestamp."""
timestamp: int
@dataclass
class FirstShredReceived(SlotAndTimestampBase):
"""First shread received update."""
type: Literal["firstShredReceived"]
@dataclass
class Completed(SlotAndTimestampBase):
"""Slot completed update."""
type: Literal["completed"]
@dataclass
class CreatedBank(SlotAndTimestampBase):
"""Created bank update."""
parent: int
type: Literal["createdBank"]
@dataclass
class SlotTransactionStats:
"""Slot transaction stats."""
num_transaction_entries: int = field(metadata=alias("numTransactionEntries"))
num_successful_transactions: int = field(metadata=alias("numSuccessfulTransactions"))
num_failed_transactions: int = field(metadata=alias("numFailedTransactions"))
max_transactions_per_entry: int = field(metadata=alias("maxTransactionsPerEntry"))
@dataclass
class Frozen(SlotAndTimestampBase):
"""Slot frozen update."""
stats: SlotTransactionStats
type: Literal["frozen"]
@dataclass
class Dead(SlotAndTimestampBase):
"""Dead slot update."""
err: str
type: Literal["dead"]
@dataclass
class OptimisticConfirmation(SlotAndTimestampBase):
"""Optimistic confirmation update."""
type: Literal["optimisticConfirmation"]
@dataclass
class Root(SlotAndTimestampBase):
"""Root update."""
type: Literal["root"]
SlotsUpdatesItem = Union[FirstShredReceived, Completed, CreatedBank, Frozen, Dead, OptimisticConfirmation, Root]
@dataclass
class SlotsUpdatesNotification(SubscriptionNotificationBase):
"""Slots updates notification."""
result: SlotsUpdatesItem
@dataclass
class VoteItem:
"""Vote data."""
hash: str
slots: List[int]
timestamp: Optional[int]
@dataclass
class VoteNotification(SubscriptionNotificationBase):
"""Vote update notification."""
result: VoteItem
SubscriptionNotification = Union[
AccountNotification,
LogsNotification,
ProgramNotification,
SignatureNotification,
SlotNotification,
RootNotification,
SlotsUpdatesNotification,
VoteNotification,
]
| """This module contains code for parsing RPC responses."""
from dataclasses import dataclass, field
from typing import Union, Tuple, Any, Dict, List, Optional, Literal
from apischema import alias
from apischema.conversions import as_str
from solana.publickey import PublicKey
from solana.transaction import TransactionSignature
as_str(PublicKey)
TransactionErrorResult = Optional[dict]
@dataclass
class TransactionErr:
"""Container for possible transaction errors."""
err: TransactionErrorResult
@dataclass
class Context:
"""RPC result context."""
slot: int
@dataclass
class WithContext:
"""Base class for RPC result including context."""
context: Context
@dataclass
class AccountInfo:
"""Account information."""
lamports: int
owner: PublicKey
data: Union[Literal[""], Tuple[str, str], Dict[str, Any]]
executable: bool
rent_epoch: int = field(metadata=alias("rentEpoch"))
@dataclass
class AccountInfoAndContext(WithContext):
"""Account info and RPC result context."""
value: AccountInfo
@dataclass
class SubscriptionNotificationBase:
"""Base class for RPC subscription notifications."""
subscription: int
result: Any
@dataclass
class AccountNotification(SubscriptionNotificationBase):
"""Account subscription notification."""
result: AccountInfoAndContext
@dataclass
class LogItem(TransactionErr):
"""Container for logs from logSubscribe."""
signature: TransactionSignature
logs: Optional[List[str]]
@dataclass
class LogItemAndContext(WithContext):
"""Log item with RPC result context."""
value: LogItem
@dataclass
class LogsNotification(SubscriptionNotificationBase):
"""Logs subscription notification."""
result: LogItemAndContext
@dataclass
class ProgramAccount:
"""Program account pubkey and account info."""
pubkey: PublicKey
account: AccountInfo
@dataclass
class ProgramAccountAndContext(WithContext):
"""Program subscription data with RPC result context."""
value: ProgramAccount
@dataclass
class ProgramNotification(SubscriptionNotificationBase):
"""Program subscription notification."""
result: ProgramAccountAndContext
@dataclass
class SignatureErrAndContext(WithContext):
"""Signature subscription error info with RPC result context."""
value: TransactionErr
@dataclass
class SignatureNotification(SubscriptionNotificationBase):
"""Signature subscription notification."""
result: SignatureErrAndContext
@dataclass
class SlotBase:
"""Base class for slot container."""
slot: int
@dataclass
class SlotInfo(SlotBase):
"""Slot info."""
parent: int
root: int
@dataclass
class SlotNotification(SubscriptionNotificationBase):
"""Slot subscription notification."""
result: SlotInfo
@dataclass
class RootNotification(SubscriptionNotificationBase):
"""Root subscription notification."""
result: int
@dataclass
class SlotAndTimestampBase(SlotBase):
"""Base class for a slot with timestamp."""
timestamp: int
@dataclass
class FirstShredReceived(SlotAndTimestampBase):
"""First shread received update."""
type: Literal["firstShredReceived"]
@dataclass
class Completed(SlotAndTimestampBase):
"""Slot completed update."""
type: Literal["completed"]
@dataclass
class CreatedBank(SlotAndTimestampBase):
"""Created bank update."""
parent: int
type: Literal["createdBank"]
@dataclass
class SlotTransactionStats:
"""Slot transaction stats."""
num_transaction_entries: int = field(metadata=alias("numTransactionEntries"))
num_successful_transactions: int = field(metadata=alias("numSuccessfulTransactions"))
num_failed_transactions: int = field(metadata=alias("numFailedTransactions"))
max_transactions_per_entry: int = field(metadata=alias("maxTransactionsPerEntry"))
@dataclass
class Frozen(SlotAndTimestampBase):
"""Slot frozen update."""
stats: SlotTransactionStats
type: Literal["frozen"]
@dataclass
class Dead(SlotAndTimestampBase):
"""Dead slot update."""
err: str
type: Literal["dead"]
@dataclass
class OptimisticConfirmation(SlotAndTimestampBase):
"""Optimistic confirmation update."""
type: Literal["optimisticConfirmation"]
@dataclass
class Root(SlotAndTimestampBase):
"""Root update."""
type: Literal["root"]
SlotsUpdatesItem = Union[FirstShredReceived, Completed, CreatedBank, Frozen, Dead, OptimisticConfirmation, Root]
@dataclass
class SlotsUpdatesNotification(SubscriptionNotificationBase):
"""Slots updates notification."""
result: SlotsUpdatesItem
@dataclass
class VoteItem:
"""Vote data."""
hash: str
slots: List[int]
timestamp: Optional[int]
@dataclass
class VoteNotification(SubscriptionNotificationBase):
"""Vote update notification."""
result: VoteItem
SubscriptionNotification = Union[
AccountNotification,
LogsNotification,
ProgramNotification,
SignatureNotification,
SlotNotification,
RootNotification,
SlotsUpdatesNotification,
VoteNotification,
]
| en | 0.646626 | This module contains code for parsing RPC responses. Container for possible transaction errors. RPC result context. Base class for RPC result including context. Account information. Account info and RPC result context. Base class for RPC subscription notifications. Account subscription notification. Container for logs from logSubscribe. Log item with RPC result context. Logs subscription notification. Program account pubkey and account info. Program subscription data with RPC result context. Program subscription notification. Signature subscription error info with RPC result context. Signature subscription notification. Base class for slot container. Slot info. Slot subscription notification. Root subscription notification. Base class for a slot with timestamp. First shread received update. Slot completed update. Created bank update. Slot transaction stats. Slot frozen update. Dead slot update. Optimistic confirmation update. Root update. Slots updates notification. Vote data. Vote update notification. | 2.321556 | 2 |
python/data_structures/binheap.py | adriennekarnoski/data-structures | 1 | 9441 | <gh_stars>1-10
"""Build a binary min heap object."""
from math import floor
class BinaryHeap(object):
"""Create a Binary Heap object as a Min Heap."""
def __init__(self):
"""Initialize the heap list to be used by Binary Heap."""
self._heap_list = []
def push(self, val):
"""Add new value to heap list and run check heap method."""
self._heap_list.append(val)
if len(self._heap_list) == 2:
self._small_heap()
self._check_heap()
def _small_heap(self):
heap = self._heap_list
if heap[0] > heap[1]:
heap[0], heap[1] = heap[1], heap[0]
return heap
def _check_heap(self):
"""Check all the children are less than their parents."""
heap = self._heap_list
index = floor((len(heap) - 1) / 2)
i = 0
while i < index:
l = (2 * i) + 1
if heap[i] > heap[l]:
heap[i], heap[l] = heap[l], heap[i]
try:
r = (2 * i) + 2
if heap[i] > heap[r]:
heap[i], heap[r] = heap[r], heap[i]
except IndexError: # pragma: no cover
pass
i += 1
return heap
def pop(self):
"""Remove top value of heap and run check heap method."""
try:
heap = self._heap_list
index = len(heap) - 1
heap[0], heap[index] = heap[index], heap[0]
self._heap_list.pop()
if len(self._heap_list) == 2:
self._small_heap()
self._check_heap()
return heap
except IndexError:
raise IndexError('Nothing available to pop')
def _display(self): # pragma: no cover
"""Make it easier during testing."""
for item in self._heap_list:
print(item)
| """Build a binary min heap object."""
from math import floor
class BinaryHeap(object):
"""Create a Binary Heap object as a Min Heap."""
def __init__(self):
"""Initialize the heap list to be used by Binary Heap."""
self._heap_list = []
def push(self, val):
"""Add new value to heap list and run check heap method."""
self._heap_list.append(val)
if len(self._heap_list) == 2:
self._small_heap()
self._check_heap()
def _small_heap(self):
heap = self._heap_list
if heap[0] > heap[1]:
heap[0], heap[1] = heap[1], heap[0]
return heap
def _check_heap(self):
"""Check all the children are less than their parents."""
heap = self._heap_list
index = floor((len(heap) - 1) / 2)
i = 0
while i < index:
l = (2 * i) + 1
if heap[i] > heap[l]:
heap[i], heap[l] = heap[l], heap[i]
try:
r = (2 * i) + 2
if heap[i] > heap[r]:
heap[i], heap[r] = heap[r], heap[i]
except IndexError: # pragma: no cover
pass
i += 1
return heap
def pop(self):
"""Remove top value of heap and run check heap method."""
try:
heap = self._heap_list
index = len(heap) - 1
heap[0], heap[index] = heap[index], heap[0]
self._heap_list.pop()
if len(self._heap_list) == 2:
self._small_heap()
self._check_heap()
return heap
except IndexError:
raise IndexError('Nothing available to pop')
def _display(self): # pragma: no cover
"""Make it easier during testing."""
for item in self._heap_list:
print(item) | en | 0.876177 | Build a binary min heap object. Create a Binary Heap object as a Min Heap. Initialize the heap list to be used by Binary Heap. Add new value to heap list and run check heap method. Check all the children are less than their parents. # pragma: no cover Remove top value of heap and run check heap method. # pragma: no cover Make it easier during testing. | 4.12915 | 4 |
vesper/archive_settings.py | RichardLitt/Vesper | 29 | 9442 | """
Vesper archive settings.
The Vesper server serves the Vesper archive that is in the directory
in which the server starts. The archive settings are the composition
of a set of default settings (hard-coded in this module) and settings
(optionally) specified in the file "Archive Settings.yaml" in the
archive directory.
"""
from pathlib import Path
import os
import sys
from vesper.util.settings import Settings
from vesper.util.settings_type import SettingsType
import vesper.archive_paths as archive_paths
_DEFAULT_SETTINGS = Settings.create_from_yaml('''
database:
engine: SQLite
''')
_SETTINGS_TYPE = SettingsType('Archive Settings', _DEFAULT_SETTINGS)
_SETTINGS_FILE_NAME = 'Archive Settings.yaml'
def _create_settings():
archive_dir_path = Path(os.getcwd())
settings = _load_settings_file(archive_dir_path)
archive_paths.initialize(archive_dir_path, settings)
return settings
def _load_settings_file(archive_dir_path):
file_path = archive_dir_path / _SETTINGS_FILE_NAME
if not file_path.exists():
# settings file doex not exist
return _SETTINGS_TYPE.defaults
else:
# settings file exists
try:
return _SETTINGS_TYPE.create_settings_from_yaml_file(file_path)
except Exception as e:
print((
'Load failed for settings file "{}". Error message '
'was: {}').format(file_path, str(e)))
sys.exit(1)
archive_settings = _create_settings()
| """
Vesper archive settings.
The Vesper server serves the Vesper archive that is in the directory
in which the server starts. The archive settings are the composition
of a set of default settings (hard-coded in this module) and settings
(optionally) specified in the file "Archive Settings.yaml" in the
archive directory.
"""
from pathlib import Path
import os
import sys
from vesper.util.settings import Settings
from vesper.util.settings_type import SettingsType
import vesper.archive_paths as archive_paths
_DEFAULT_SETTINGS = Settings.create_from_yaml('''
database:
engine: SQLite
''')
_SETTINGS_TYPE = SettingsType('Archive Settings', _DEFAULT_SETTINGS)
_SETTINGS_FILE_NAME = 'Archive Settings.yaml'
def _create_settings():
archive_dir_path = Path(os.getcwd())
settings = _load_settings_file(archive_dir_path)
archive_paths.initialize(archive_dir_path, settings)
return settings
def _load_settings_file(archive_dir_path):
file_path = archive_dir_path / _SETTINGS_FILE_NAME
if not file_path.exists():
# settings file doex not exist
return _SETTINGS_TYPE.defaults
else:
# settings file exists
try:
return _SETTINGS_TYPE.create_settings_from_yaml_file(file_path)
except Exception as e:
print((
'Load failed for settings file "{}". Error message '
'was: {}').format(file_path, str(e)))
sys.exit(1)
archive_settings = _create_settings()
| en | 0.839428 | Vesper archive settings. The Vesper server serves the Vesper archive that is in the directory in which the server starts. The archive settings are the composition of a set of default settings (hard-coded in this module) and settings (optionally) specified in the file "Archive Settings.yaml" in the archive directory. database: engine: SQLite # settings file doex not exist # settings file exists | 2.557775 | 3 |
autotf/model/vgg16.py | DAIM-ML/autotf | 8 | 9443 | <filename>autotf/model/vgg16.py<gh_stars>1-10
#-*- coding=utf-8 -*-
from __future__ import division, print_function, absolute_import
from base_model import BaseModel
from helper import *
import tensorflow as tf
import pickle
import numpy as np
import time
class Vgg16(BaseModel):
default_param = {
"loss" : "square_loss",
"metrics" : ["loss"],
"optimizer" : "sgd",
"learning_rate" : 1e-2,
"batch_size" : 100,
"num_epochs" : 25,
"keep_prob":0.75
}
def __init__(self,classnum):
self.class_num = classnum
self.model = None
self.sess = tf.Session()
self.scope = {}
self.summary = []
def conv2d(self,layer_name,inputs, out_channels, kernel_size, strides=1, padding='SAME'):
in_channels = inputs.get_shape()[-1]
with tf.variable_scope(layer_name) as scope:
self.scope[layer_name] = scope
w = tf.get_variable(name='weights',
trainable=True,
shape=[kernel_size, kernel_size, in_channels, out_channels],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='biases',
trainable=True,
shape=[out_channels],
initializer=tf.constant_initializer(0.0))
inputs = tf.nn.conv2d(inputs, w, [1, strides, strides, 1], padding=padding, name='conv')
inputs = tf.nn.bias_add(inputs, b, name='bias_add')
inputs = tf.nn.relu(inputs, name='relu')
return inputs
def max_pool(self, layer_name, inputs, pool_size, strides, padding='SAME'):
with tf.name_scope(layer_name):
return tf.nn.max_pool(inputs, [1, pool_size, pool_size, 1], [1, strides, strides, 1], padding=padding,
name=layer_name)
def avg_pool(self, layer_name, inputs, pool_size, strides, padding='SAME'):
with tf.name_scope(layer_name):
return tf.nn.avg_pool(inputs, [1, pool_size, pool_size, 1], [1, strides, strides, 1], padding=padding,
name=layer_name)
def lrn(self, layer_name, inputs, depth_radius=5, alpha=0.0001, beta=0.75):
with tf.name_scope(layer_name):
return tf.nn.local_response_normalization(name='pool1_norm1', input=inputs, depth_radius=depth_radius,
alpha=alpha, beta=beta)
def concat(self, layer_name, inputs):
with tf.name_scope(layer_name):
one_by_one = inputs[0]
three_by_three = inputs[1]
five_by_five = inputs[2]
pooling = inputs[3]
return tf.concat([one_by_one, three_by_three, five_by_five, pooling], axis=3)
def dropout(self, layer_name, inputs, keep_prob):
# dropout_rate = 1 - keep_prob
with tf.name_scope(layer_name):
return tf.nn.dropout(name=layer_name, x=inputs, keep_prob=keep_prob)
def bn(self, layer_name, inputs, epsilon=1e-3):
with tf.name_scope(layer_name):
batch_mean, batch_var = tf.nn.moments(inputs, [0])
inputs = tf.nn.batch_normalization(inputs, mean=batch_mean, variance=batch_var, offset=None,
scale=None, variance_epsilon=epsilon)
return inputs
def fc(self, layer_name, inputs, out_nodes):
shape = inputs.get_shape()
if len(shape) == 4: # x is 4D tensor
size = shape[1].value * shape[2].value * shape[3].value
else: # x has already flattened
size = shape[-1].value
with tf.variable_scope(layer_name) as scope:
self.scope[layer_name] = scope
w = tf.get_variable('weights',
shape=[size, out_nodes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('biases',
shape=[out_nodes],
initializer=tf.constant_initializer(0.0))
flat_x = tf.reshape(inputs, [-1, size])
inputs = tf.nn.bias_add(tf.matmul(flat_x, w), b)
inputs = tf.nn.relu(inputs)
return inputs
def build_model(self):
# 训练数据
self.inputs = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
# 训练标签数据
self.labels = tf.placeholder(tf.float32, shape=[None, self.class_num])
# dropout
self.keep_prob = tf.placeholder(tf.float32)
self.conv1_1 = self.conv2d("conv1_1",self.inputs,64,3)
self.conv1_2 = self.conv2d("conv1_2",self.conv1_1, 64,3)
self.pool1 = self.max_pool('pool1',self.conv1_2,pool_size=2,strides=2)
#112*112*64
self.conv2_1 = self.conv2d("conv2_1",self.pool1, 128,3)
self.conv2_2 = self.conv2d( "conv2_2",self.conv2_1, 128,3)
self.pool2 = self.max_pool("pool2",self.conv2_2,pool_size=2,strides=2)
#56*56*128
self.conv3_1 = self.conv2d("conv3_1",self.pool2, 256,3)
self.conv3_2 = self.conv2d("conv3_2",self.conv3_1, 256,3)
self.conv3_3 = self.conv2d("conv3_3",self.conv3_2, 256, 3)
self.pool3 = self.max_pool("pool3",self.conv3_3,pool_size=2,strides=2)
#28*28*256
self.conv4_1 = self.conv2d("conv4_1",self.pool3, 512, 3)
self.conv4_2 = self.conv2d("conv4_2",self.conv4_1, 512, 3)
self.conv4_3 = self.conv2d("conv4_3",self.conv4_2, 512, 3)
self.pool4 = self.max_pool("pool4",self.conv4_3, pool_size=2,strides=2)
#14*14*512
self.conv5_1 = self.conv2d("conv5_1",self.pool4, 512, 3)
self.conv5_2 = self.conv2d("conv5_2",self.conv5_1, 512, 3)
self.conv5_3 = self.conv2d("conv5_3",self.conv5_2, 512, 3)
self.pool5 = self.max_pool( 'pool5',self.conv5_3,pool_size=2,strides=2)
#7*7*512
self.fc6 = self.fc("fc6",self.pool5,4096) # 25088 = 7*7*512
self.relu6 = tf.nn.dropout(self.fc6, self.keep_prob)
self.fc7 = self.fc("fc7",self.relu6,4096)
self.relu7 = tf.nn.dropout(self.fc7, self.keep_prob)
self.pred = self.fc("fc8",self.relu7, self.class_num)
def set_parameter(self, param):
for name in self.default_param:
if name not in param:
param[name] = self.default_param[name]
self.build_model()
# 定义交叉熵损失函数
self.keep_prob_value = param["keep_prob"]
loss_fun = param["loss"]
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.labels))
optimizer = param["optimizer"]
self.learning_rate = param["learning_rate"]
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
self.correct_prediction = tf.equal(tf.argmax(self.pred, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.batch_size = param["batch_size"]
self.num_epochs = param["num_epochs"]
def get_batch(self, feed_data):
X = feed_data["inputs"]
Y = feed_data["labels"]
totalbatch = int(len(X)/self.batch_size)+1
if (totalbatch * self.batch_size == len(X)):
totalbatch = totalbatch - 1
for i in range(0,totalbatch):
startindex = i*self.batch_size
endindex = (i+1)*self.batch_size
batch_xs = X[startindex:endindex]
batch_ys = Y[startindex:endindex]
yield { "batch_xs" : batch_xs, "batch_ys" : batch_ys }
def train(self, feed_data):
self.sess.run(tf.global_variables_initializer())
trainstep = 0
for epoch in range(self.num_epochs):
avg_cost = 0.0
totalaccuracy = 0.0
for batch in self.get_batch(feed_data):
feed_dict = {
self.inputs : batch["batch_xs"],
self.labels : batch["batch_ys"],
self.keep_prob: self.keep_prob_value,
}
_, loss, acc = self.sess.run([self.optimizer, self.loss,self.accuracy], feed_dict=feed_dict)
totalaccuracy += acc*len(batch["batch_xs"])
avg_cost += loss
trainstep = trainstep + 1
totalaccuracy /= len(feed_data['inputs'])
print("train_step"+"\t"+str(trainstep)+"\t"+"epoch:"+"\t"+str(epoch+1)+"\t"+"accuracy:"+"\t"+str(totalaccuracy)+"\t"+"loss:"+"\t"+str(avg_cost))
def model_load(self,path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
return
def model_save(self,path):
saver = tf.train.Saver()
saver.save(self.sess, path)
return
def evaluate(self, feed_data):
avg_loss = 0.0
totalaccuracy = 0.0
totallen = len(feed_data["inputs"])
for batch in self.get_batch(feed_data):
feed_dict = {
self.inputs: batch["batch_xs"],
self.labels: batch["batch_ys"],
self.keep_prob:self.keep_prob_value
}
loss, acc = self.sess.run([self.loss, self.accuracy], feed_dict=feed_dict)
totalaccuracy += acc * len(batch["batch_xs"])
avg_loss += loss
avg_loss /= totallen
totalaccuracy /= len(feed_data['inputs'])
res = {"accuracy":totalaccuracy,"loss":avg_loss}
return res
def predict(self, feed_data):
res = []
for batch in self.get_batch(feed_data):
feed_dict = {
self.inputs: batch["batch_xs"]
}
pred = self.sess.run(self.pred, feed_dict=feed_dict)
res.extend(pred.tolist())
return res
| <filename>autotf/model/vgg16.py<gh_stars>1-10
#-*- coding=utf-8 -*-
from __future__ import division, print_function, absolute_import
from base_model import BaseModel
from helper import *
import tensorflow as tf
import pickle
import numpy as np
import time
class Vgg16(BaseModel):
default_param = {
"loss" : "square_loss",
"metrics" : ["loss"],
"optimizer" : "sgd",
"learning_rate" : 1e-2,
"batch_size" : 100,
"num_epochs" : 25,
"keep_prob":0.75
}
def __init__(self,classnum):
self.class_num = classnum
self.model = None
self.sess = tf.Session()
self.scope = {}
self.summary = []
def conv2d(self,layer_name,inputs, out_channels, kernel_size, strides=1, padding='SAME'):
in_channels = inputs.get_shape()[-1]
with tf.variable_scope(layer_name) as scope:
self.scope[layer_name] = scope
w = tf.get_variable(name='weights',
trainable=True,
shape=[kernel_size, kernel_size, in_channels, out_channels],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='biases',
trainable=True,
shape=[out_channels],
initializer=tf.constant_initializer(0.0))
inputs = tf.nn.conv2d(inputs, w, [1, strides, strides, 1], padding=padding, name='conv')
inputs = tf.nn.bias_add(inputs, b, name='bias_add')
inputs = tf.nn.relu(inputs, name='relu')
return inputs
def max_pool(self, layer_name, inputs, pool_size, strides, padding='SAME'):
with tf.name_scope(layer_name):
return tf.nn.max_pool(inputs, [1, pool_size, pool_size, 1], [1, strides, strides, 1], padding=padding,
name=layer_name)
def avg_pool(self, layer_name, inputs, pool_size, strides, padding='SAME'):
with tf.name_scope(layer_name):
return tf.nn.avg_pool(inputs, [1, pool_size, pool_size, 1], [1, strides, strides, 1], padding=padding,
name=layer_name)
def lrn(self, layer_name, inputs, depth_radius=5, alpha=0.0001, beta=0.75):
with tf.name_scope(layer_name):
return tf.nn.local_response_normalization(name='pool1_norm1', input=inputs, depth_radius=depth_radius,
alpha=alpha, beta=beta)
def concat(self, layer_name, inputs):
with tf.name_scope(layer_name):
one_by_one = inputs[0]
three_by_three = inputs[1]
five_by_five = inputs[2]
pooling = inputs[3]
return tf.concat([one_by_one, three_by_three, five_by_five, pooling], axis=3)
def dropout(self, layer_name, inputs, keep_prob):
# dropout_rate = 1 - keep_prob
with tf.name_scope(layer_name):
return tf.nn.dropout(name=layer_name, x=inputs, keep_prob=keep_prob)
def bn(self, layer_name, inputs, epsilon=1e-3):
with tf.name_scope(layer_name):
batch_mean, batch_var = tf.nn.moments(inputs, [0])
inputs = tf.nn.batch_normalization(inputs, mean=batch_mean, variance=batch_var, offset=None,
scale=None, variance_epsilon=epsilon)
return inputs
def fc(self, layer_name, inputs, out_nodes):
shape = inputs.get_shape()
if len(shape) == 4: # x is 4D tensor
size = shape[1].value * shape[2].value * shape[3].value
else: # x has already flattened
size = shape[-1].value
with tf.variable_scope(layer_name) as scope:
self.scope[layer_name] = scope
w = tf.get_variable('weights',
shape=[size, out_nodes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('biases',
shape=[out_nodes],
initializer=tf.constant_initializer(0.0))
flat_x = tf.reshape(inputs, [-1, size])
inputs = tf.nn.bias_add(tf.matmul(flat_x, w), b)
inputs = tf.nn.relu(inputs)
return inputs
def build_model(self):
# 训练数据
self.inputs = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
# 训练标签数据
self.labels = tf.placeholder(tf.float32, shape=[None, self.class_num])
# dropout
self.keep_prob = tf.placeholder(tf.float32)
self.conv1_1 = self.conv2d("conv1_1",self.inputs,64,3)
self.conv1_2 = self.conv2d("conv1_2",self.conv1_1, 64,3)
self.pool1 = self.max_pool('pool1',self.conv1_2,pool_size=2,strides=2)
#112*112*64
self.conv2_1 = self.conv2d("conv2_1",self.pool1, 128,3)
self.conv2_2 = self.conv2d( "conv2_2",self.conv2_1, 128,3)
self.pool2 = self.max_pool("pool2",self.conv2_2,pool_size=2,strides=2)
#56*56*128
self.conv3_1 = self.conv2d("conv3_1",self.pool2, 256,3)
self.conv3_2 = self.conv2d("conv3_2",self.conv3_1, 256,3)
self.conv3_3 = self.conv2d("conv3_3",self.conv3_2, 256, 3)
self.pool3 = self.max_pool("pool3",self.conv3_3,pool_size=2,strides=2)
#28*28*256
self.conv4_1 = self.conv2d("conv4_1",self.pool3, 512, 3)
self.conv4_2 = self.conv2d("conv4_2",self.conv4_1, 512, 3)
self.conv4_3 = self.conv2d("conv4_3",self.conv4_2, 512, 3)
self.pool4 = self.max_pool("pool4",self.conv4_3, pool_size=2,strides=2)
#14*14*512
self.conv5_1 = self.conv2d("conv5_1",self.pool4, 512, 3)
self.conv5_2 = self.conv2d("conv5_2",self.conv5_1, 512, 3)
self.conv5_3 = self.conv2d("conv5_3",self.conv5_2, 512, 3)
self.pool5 = self.max_pool( 'pool5',self.conv5_3,pool_size=2,strides=2)
#7*7*512
self.fc6 = self.fc("fc6",self.pool5,4096) # 25088 = 7*7*512
self.relu6 = tf.nn.dropout(self.fc6, self.keep_prob)
self.fc7 = self.fc("fc7",self.relu6,4096)
self.relu7 = tf.nn.dropout(self.fc7, self.keep_prob)
self.pred = self.fc("fc8",self.relu7, self.class_num)
def set_parameter(self, param):
for name in self.default_param:
if name not in param:
param[name] = self.default_param[name]
self.build_model()
# 定义交叉熵损失函数
self.keep_prob_value = param["keep_prob"]
loss_fun = param["loss"]
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.labels))
optimizer = param["optimizer"]
self.learning_rate = param["learning_rate"]
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
self.correct_prediction = tf.equal(tf.argmax(self.pred, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.batch_size = param["batch_size"]
self.num_epochs = param["num_epochs"]
def get_batch(self, feed_data):
X = feed_data["inputs"]
Y = feed_data["labels"]
totalbatch = int(len(X)/self.batch_size)+1
if (totalbatch * self.batch_size == len(X)):
totalbatch = totalbatch - 1
for i in range(0,totalbatch):
startindex = i*self.batch_size
endindex = (i+1)*self.batch_size
batch_xs = X[startindex:endindex]
batch_ys = Y[startindex:endindex]
yield { "batch_xs" : batch_xs, "batch_ys" : batch_ys }
def train(self, feed_data):
self.sess.run(tf.global_variables_initializer())
trainstep = 0
for epoch in range(self.num_epochs):
avg_cost = 0.0
totalaccuracy = 0.0
for batch in self.get_batch(feed_data):
feed_dict = {
self.inputs : batch["batch_xs"],
self.labels : batch["batch_ys"],
self.keep_prob: self.keep_prob_value,
}
_, loss, acc = self.sess.run([self.optimizer, self.loss,self.accuracy], feed_dict=feed_dict)
totalaccuracy += acc*len(batch["batch_xs"])
avg_cost += loss
trainstep = trainstep + 1
totalaccuracy /= len(feed_data['inputs'])
print("train_step"+"\t"+str(trainstep)+"\t"+"epoch:"+"\t"+str(epoch+1)+"\t"+"accuracy:"+"\t"+str(totalaccuracy)+"\t"+"loss:"+"\t"+str(avg_cost))
def model_load(self,path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
return
def model_save(self,path):
saver = tf.train.Saver()
saver.save(self.sess, path)
return
def evaluate(self, feed_data):
avg_loss = 0.0
totalaccuracy = 0.0
totallen = len(feed_data["inputs"])
for batch in self.get_batch(feed_data):
feed_dict = {
self.inputs: batch["batch_xs"],
self.labels: batch["batch_ys"],
self.keep_prob:self.keep_prob_value
}
loss, acc = self.sess.run([self.loss, self.accuracy], feed_dict=feed_dict)
totalaccuracy += acc * len(batch["batch_xs"])
avg_loss += loss
avg_loss /= totallen
totalaccuracy /= len(feed_data['inputs'])
res = {"accuracy":totalaccuracy,"loss":avg_loss}
return res
def predict(self, feed_data):
res = []
for batch in self.get_batch(feed_data):
feed_dict = {
self.inputs: batch["batch_xs"]
}
pred = self.sess.run(self.pred, feed_dict=feed_dict)
res.extend(pred.tolist())
return res
| en | 0.583058 | #-*- coding=utf-8 -*- # dropout_rate = 1 - keep_prob # x is 4D tensor # x has already flattened # 训练数据 # 训练标签数据 # dropout #112*112*64 #56*56*128 #28*28*256 #14*14*512 #7*7*512 # 25088 = 7*7*512 # 定义交叉熵损失函数 | 2.259641 | 2 |
LEGEND/modules/_exec.py | RAJESHSAINI2113/LEGENDX | 2 | 9444 | import subprocess
from LEGEND import tbot as bot
from LEGEND import tbot as borg
from LEGEND.events import register
from LEGEND import OWNER_ID, SUDO_USERS
import asyncio
import traceback
import io
import os
import sys
import time
from telethon.tl import functions
from telethon.tl import types
from telethon.tl.types import *
from telethon.errors import *
@register(pattern="^/bash (.*)")
async def msg(event):
if event.sender_id == OWNER_ID:
pass
else:
return
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
e = stderr.decode()
if not e:
e = "No Error"
o = stdout.decode()
if not o:
o = "**Tip**: \n`If you want to see the results of your code, I suggest printing them to stdout.`"
else:
_o = o.split("\n")
o = "`\n".join(_o)
await event.reply(f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
)
@register(pattern="^/eval")
async def _(event):
if event.sender_id == OWNER_ID:
pass
elif event.sender_id in SUDO_USERS:
pass
else:
return
cmd = event.text.split(" ", maxsplit=1)[1]
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
old_stderr = sys.stderr
old_stdout = sys.stdout
redirected_output = sys.stdout = io.StringIO()
redirected_error = sys.stderr = io.StringIO()
stdout, stderr, exc = None, None, None
try:
await aexec(cmd, event)
except Exception:
exc = traceback.format_exc()
stdout = redirected_output.getvalue()
stderr = redirected_error.getvalue()
sys.stdout = old_stdout
sys.stderr = old_stderr
evaluation = ""
if exc:
evaluation = exc
elif stderr:
evaluation = stderr
elif stdout:
evaluation = stdout
else:
evaluation = "Success"
final_output = "**EVAL**: `{}` \n\n **OUTPUT**: \n`{}` \n".format(cmd, evaluation)
MAX_MESSAGE_SIZE_LIMIT = 4095
if len(final_output) > MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(final_output)) as out_file:
out_file.name = "eval.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id,
)
else:
await event.reply(final_output)
async def aexec(code, smessatatus):
message = event = smessatatus
def p(_x):
return print(slitu.yaml_format(_x))
reply = await event.get_reply_message()
exec(
"async def __aexec(message, reply, client, p): "
+ "\n event = smessatatus = message"
+ "".join(f"\n {l}" for l in code.split("\n"))
)
return await locals()["__aexec"](message, reply, bot, p)
| import subprocess
from LEGEND import tbot as bot
from LEGEND import tbot as borg
from LEGEND.events import register
from LEGEND import OWNER_ID, SUDO_USERS
import asyncio
import traceback
import io
import os
import sys
import time
from telethon.tl import functions
from telethon.tl import types
from telethon.tl.types import *
from telethon.errors import *
@register(pattern="^/bash (.*)")
async def msg(event):
if event.sender_id == OWNER_ID:
pass
else:
return
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
e = stderr.decode()
if not e:
e = "No Error"
o = stdout.decode()
if not o:
o = "**Tip**: \n`If you want to see the results of your code, I suggest printing them to stdout.`"
else:
_o = o.split("\n")
o = "`\n".join(_o)
await event.reply(f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
)
@register(pattern="^/eval")
async def _(event):
if event.sender_id == OWNER_ID:
pass
elif event.sender_id in SUDO_USERS:
pass
else:
return
cmd = event.text.split(" ", maxsplit=1)[1]
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
old_stderr = sys.stderr
old_stdout = sys.stdout
redirected_output = sys.stdout = io.StringIO()
redirected_error = sys.stderr = io.StringIO()
stdout, stderr, exc = None, None, None
try:
await aexec(cmd, event)
except Exception:
exc = traceback.format_exc()
stdout = redirected_output.getvalue()
stderr = redirected_error.getvalue()
sys.stdout = old_stdout
sys.stderr = old_stderr
evaluation = ""
if exc:
evaluation = exc
elif stderr:
evaluation = stderr
elif stdout:
evaluation = stdout
else:
evaluation = "Success"
final_output = "**EVAL**: `{}` \n\n **OUTPUT**: \n`{}` \n".format(cmd, evaluation)
MAX_MESSAGE_SIZE_LIMIT = 4095
if len(final_output) > MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(final_output)) as out_file:
out_file.name = "eval.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id,
)
else:
await event.reply(final_output)
async def aexec(code, smessatatus):
message = event = smessatatus
def p(_x):
return print(slitu.yaml_format(_x))
reply = await event.get_reply_message()
exec(
"async def __aexec(message, reply, client, p): "
+ "\n event = smessatatus = message"
+ "".join(f"\n {l}" for l in code.split("\n"))
)
return await locals()["__aexec"](message, reply, bot, p)
| none | 1 | 1.930555 | 2 |
|
src/tools/pch.py | MaxSac/build | 11,356 | 9445 | # Status: Being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (c) 2005 <NAME>.
# Copyright 2006 <NAME>
# Copyright (c) 2008 <NAME>
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##### Using Precompiled Headers (Quick Guide) #####
#
# Make precompiled mypch.hpp:
#
# import pch ;
#
# cpp-pch mypch
# : # sources
# mypch.hpp
# : # requiremnts
# <toolset>msvc:<source>mypch.cpp
# ;
#
# Add cpp-pch to sources:
#
# exe hello
# : main.cpp hello.cpp mypch
# ;
from b2.build import type, feature, generators
from b2.tools import builtin
type.register('PCH', ['pch'])
type.register('C_PCH', [], 'PCH')
type.register('CPP_PCH', [], 'PCH')
# Control precompiled header (PCH) generation.
feature.feature('pch',
['on', 'off'],
['propagated'])
feature.feature('pch-header', [], ['free', 'dependency'])
feature.feature('pch-file', [], ['free', 'dependency'])
class PchGenerator(generators.Generator):
"""
Base PCH generator. The 'run' method has the logic to prevent this generator
from being run unless it's being used for a top-level PCH target.
"""
def action_class(self):
return builtin.CompileAction
def run(self, project, name, prop_set, sources):
if not name:
# Unless this generator is invoked as the top-most generator for a
# main target, fail. This allows using 'H' type as input type for
# this generator, while preventing Boost.Build to try this generator
# when not explicitly asked for.
#
# One bad example is msvc, where pch generator produces both PCH
# target and OBJ target, so if there's any header generated (like by
# bison, or by msidl), we'd try to use pch generator to get OBJ from
# that H, which is completely wrong. By restricting this generator
# only to pch main target, such problem is solved.
pass
else:
r = self.run_pch(project, name,
prop_set.add_raw(['<define>BOOST_BUILD_PCH_ENABLED']),
sources)
return generators.add_usage_requirements(
r, ['<define>BOOST_BUILD_PCH_ENABLED'])
# This rule must be overridden by the derived classes.
def run_pch(self, project, name, prop_set, sources):
pass
# NOTE: requirements are empty, default pch generator can be applied when
# pch=off.
generators.register(builtin.DummyGenerator(
"pch.default-c-pch-generator", False, [], ['C_PCH'], []))
generators.register(builtin.DummyGenerator(
"pch.default-cpp-pch-generator", False, [], ['CPP_PCH'], []))
| # Status: Being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (c) 2005 <NAME>.
# Copyright 2006 <NAME>
# Copyright (c) 2008 <NAME>
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##### Using Precompiled Headers (Quick Guide) #####
#
# Make precompiled mypch.hpp:
#
# import pch ;
#
# cpp-pch mypch
# : # sources
# mypch.hpp
# : # requiremnts
# <toolset>msvc:<source>mypch.cpp
# ;
#
# Add cpp-pch to sources:
#
# exe hello
# : main.cpp hello.cpp mypch
# ;
from b2.build import type, feature, generators
from b2.tools import builtin
type.register('PCH', ['pch'])
type.register('C_PCH', [], 'PCH')
type.register('CPP_PCH', [], 'PCH')
# Control precompiled header (PCH) generation.
feature.feature('pch',
['on', 'off'],
['propagated'])
feature.feature('pch-header', [], ['free', 'dependency'])
feature.feature('pch-file', [], ['free', 'dependency'])
class PchGenerator(generators.Generator):
"""
Base PCH generator. The 'run' method has the logic to prevent this generator
from being run unless it's being used for a top-level PCH target.
"""
def action_class(self):
return builtin.CompileAction
def run(self, project, name, prop_set, sources):
if not name:
# Unless this generator is invoked as the top-most generator for a
# main target, fail. This allows using 'H' type as input type for
# this generator, while preventing Boost.Build to try this generator
# when not explicitly asked for.
#
# One bad example is msvc, where pch generator produces both PCH
# target and OBJ target, so if there's any header generated (like by
# bison, or by msidl), we'd try to use pch generator to get OBJ from
# that H, which is completely wrong. By restricting this generator
# only to pch main target, such problem is solved.
pass
else:
r = self.run_pch(project, name,
prop_set.add_raw(['<define>BOOST_BUILD_PCH_ENABLED']),
sources)
return generators.add_usage_requirements(
r, ['<define>BOOST_BUILD_PCH_ENABLED'])
# This rule must be overridden by the derived classes.
def run_pch(self, project, name, prop_set, sources):
pass
# NOTE: requirements are empty, default pch generator can be applied when
# pch=off.
generators.register(builtin.DummyGenerator(
"pch.default-c-pch-generator", False, [], ['C_PCH'], []))
generators.register(builtin.DummyGenerator(
"pch.default-cpp-pch-generator", False, [], ['CPP_PCH'], []))
| en | 0.802147 | # Status: Being ported by Steven Watanabe # Base revision: 47077 # # Copyright (c) 2005 <NAME>. # Copyright 2006 <NAME> # Copyright (c) 2008 <NAME> # # Use, modification and distribution is subject to the Boost Software # License Version 1.0. (See accompanying file LICENSE_1_0.txt or # http://www.boost.org/LICENSE_1_0.txt) ##### Using Precompiled Headers (Quick Guide) ##### # # Make precompiled mypch.hpp: # # import pch ; # # cpp-pch mypch # : # sources # mypch.hpp # : # requiremnts # <toolset>msvc:<source>mypch.cpp # ; # # Add cpp-pch to sources: # # exe hello # : main.cpp hello.cpp mypch # ; # Control precompiled header (PCH) generation. Base PCH generator. The 'run' method has the logic to prevent this generator from being run unless it's being used for a top-level PCH target. # Unless this generator is invoked as the top-most generator for a # main target, fail. This allows using 'H' type as input type for # this generator, while preventing Boost.Build to try this generator # when not explicitly asked for. # # One bad example is msvc, where pch generator produces both PCH # target and OBJ target, so if there's any header generated (like by # bison, or by msidl), we'd try to use pch generator to get OBJ from # that H, which is completely wrong. By restricting this generator # only to pch main target, such problem is solved. # This rule must be overridden by the derived classes. # NOTE: requirements are empty, default pch generator can be applied when # pch=off. | 1.802533 | 2 |
packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py | GitHK/osparc-simcore-forked | 0 | 9446 | import re
from typing import Dict
from aiohttp import web
from yarl import URL
from simcore_service_webserver.db_models import UserRole, UserStatus
from simcore_service_webserver.login.cfg import cfg, get_storage
from simcore_service_webserver.login.registration import create_invitation
from simcore_service_webserver.login.utils import encrypt_password, get_random_string
from .utils_assert import assert_status
TEST_MARKS = re.compile(r"TEST (\w+):(.*)")
def parse_test_marks(text):
"""Checs for marks as
TEST name:123123
TEST link:some-value
"""
marks = {}
for m in TEST_MARKS.finditer(text):
key, value = m.groups()
marks[key] = value.strip()
return marks
def parse_link(text):
link = parse_test_marks(text)["link"]
return URL(link).path
async def create_user(data=None) -> Dict:
data = data or {}
password = <PASSWORD>(10)
params = {
"name": get_random_string(10),
"email": <EMAIL>".format(get_random_string(10)),
"password_hash": <PASSWORD>(password),
}
params.update(data)
params.setdefault("status", UserStatus.ACTIVE.name)
params.setdefault("role", UserRole.USER.name)
params.setdefault("created_ip", "127.0.0.1")
user = await cfg.STORAGE.create_user(params)
user["raw_password"] = password
return user
async def log_client_in(client, user_data=None, *, enable_check=True) -> Dict:
# creates user directly in db
user = await create_user(user_data)
# login
url = client.app.router["auth_login"].url_for()
r = await client.post(
url,
json={
"email": user["email"],
"password": user["<PASSWORD>_password"],
},
)
if enable_check:
await assert_status(r, web.HTTPOk, cfg.MSG_LOGGED_IN)
return user
class NewUser:
def __init__(self, params=None, app: web.Application = None):
self.params = params
self.user = None
self.db = get_storage(app) if app else cfg.STORAGE # FIXME:
async def __aenter__(self):
self.user = await create_user(self.params)
return self.user
async def __aexit__(self, *args):
await self.db.delete_user(self.user)
class LoggedUser(NewUser):
def __init__(self, client, params=None, *, check_if_succeeds=True):
super().__init__(params, client.app)
self.client = client
self.enable_check = check_if_succeeds
async def __aenter__(self):
self.user = await log_client_in(
self.client, self.params, enable_check=self.enable_check
)
return self.user
class NewInvitation(NewUser):
def __init__(self, client, guest="", host=None):
super().__init__(host, client.app)
self.client = client
self.guest = guest or get_random_string(10)
self.confirmation = None
async def __aenter__(self):
# creates host user
self.user = await create_user(self.params)
self.confirmation = await create_invitation(self.user, self.guest, self.db)
return self.confirmation
async def __aexit__(self, *args):
if await self.db.get_confirmation(self.confirmation):
await self.db.delete_confirmation(self.confirmation)
| import re
from typing import Dict
from aiohttp import web
from yarl import URL
from simcore_service_webserver.db_models import UserRole, UserStatus
from simcore_service_webserver.login.cfg import cfg, get_storage
from simcore_service_webserver.login.registration import create_invitation
from simcore_service_webserver.login.utils import encrypt_password, get_random_string
from .utils_assert import assert_status
TEST_MARKS = re.compile(r"TEST (\w+):(.*)")
def parse_test_marks(text):
"""Checs for marks as
TEST name:123123
TEST link:some-value
"""
marks = {}
for m in TEST_MARKS.finditer(text):
key, value = m.groups()
marks[key] = value.strip()
return marks
def parse_link(text):
link = parse_test_marks(text)["link"]
return URL(link).path
async def create_user(data=None) -> Dict:
data = data or {}
password = <PASSWORD>(10)
params = {
"name": get_random_string(10),
"email": <EMAIL>".format(get_random_string(10)),
"password_hash": <PASSWORD>(password),
}
params.update(data)
params.setdefault("status", UserStatus.ACTIVE.name)
params.setdefault("role", UserRole.USER.name)
params.setdefault("created_ip", "127.0.0.1")
user = await cfg.STORAGE.create_user(params)
user["raw_password"] = password
return user
async def log_client_in(client, user_data=None, *, enable_check=True) -> Dict:
# creates user directly in db
user = await create_user(user_data)
# login
url = client.app.router["auth_login"].url_for()
r = await client.post(
url,
json={
"email": user["email"],
"password": user["<PASSWORD>_password"],
},
)
if enable_check:
await assert_status(r, web.HTTPOk, cfg.MSG_LOGGED_IN)
return user
class NewUser:
def __init__(self, params=None, app: web.Application = None):
self.params = params
self.user = None
self.db = get_storage(app) if app else cfg.STORAGE # FIXME:
async def __aenter__(self):
self.user = await create_user(self.params)
return self.user
async def __aexit__(self, *args):
await self.db.delete_user(self.user)
class LoggedUser(NewUser):
def __init__(self, client, params=None, *, check_if_succeeds=True):
super().__init__(params, client.app)
self.client = client
self.enable_check = check_if_succeeds
async def __aenter__(self):
self.user = await log_client_in(
self.client, self.params, enable_check=self.enable_check
)
return self.user
class NewInvitation(NewUser):
def __init__(self, client, guest="", host=None):
super().__init__(host, client.app)
self.client = client
self.guest = guest or get_random_string(10)
self.confirmation = None
async def __aenter__(self):
# creates host user
self.user = await create_user(self.params)
self.confirmation = await create_invitation(self.user, self.guest, self.db)
return self.confirmation
async def __aexit__(self, *args):
if await self.db.get_confirmation(self.confirmation):
await self.db.delete_confirmation(self.confirmation)
| en | 0.819756 | Checs for marks as TEST name:123123 TEST link:some-value # creates user directly in db # login # FIXME: # creates host user | 2.213646 | 2 |
indra/tests/test_sparser.py | jmuhlich/indra | 0 | 9447 | <reponame>jmuhlich/indra
from indra import sparser
xml_str1 = '''
<article pmid="54321">
<interpretation>
<sentence-text>MEK1 phosphorylates ERK1</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="agent">
<ref category="protein">
<var name="name">MP2K1_HUMAN</var>
<var name="uid">UP:MP2K1_HUMAN</var>
</ref>
</var>
<var name="substrate">
<ref category="protein">
<var name="name">MK03_HUMAN</var>
<var name="uid">UP:MK03_HUMAN</var>
</ref>
</var>
<var name="present"><ref category="present"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
xml_str2 = '''
<article pmid="12345">
<interpretation>
<sentence-text>Hence ASPP2 can be phosphorylated at serine 827 by MAPK1 in vitro</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="subordinate-conjunction">
<ref category="subordinate-conjunction"><var name="word">hence</var></ref></var>
<var name="substrate">
<ref category="protein">
<var name="name">ASPP2_HUMAN</var>
<var name="uid">UP:ASPP2_HUMAN</var>
</ref>
</var>
<var name="agent">
<ref category="protein">
<var name="context">
<ref category="in-vitro"></ref>
</var>
<var name="uid">UP:MK01_HUMAN</var>
<var name="name">MK01_HUMAN</var>
</ref>
</var>
<var name="site">
<ref category="residue-on-protein">
<var name="amino-acid">
<ref category="amino-acid"><var name="name">serine</var></ref>
</var>
<var name="position"> 827</var>
</ref>
</var>
<var name="modal"><ref category="can"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
def test_invalid_xml():
sp = sparser.process_xml('xyz')
assert(sp is None)
def test_phosphorylation():
sp = sparser.process_xml(xml_str1)
assert(len(sp.statements) == 1)
assert(sp.statements[0].enz.name == 'MAP2K1')
assert(sp.statements[0].sub.name == 'MAPK3')
assert(len(sp.statements[0].evidence) == 1)
ev = sp.statements[0].evidence[0]
assert(ev.pmid == '54321')
assert(ev.text)
assert(ev.source_api == 'sparser')
def test_phosphorylation2():
sp = sparser.process_xml(xml_str2)
assert(len(sp.statements) == 1)
assert(sp.statements[0].enz.name == 'MAPK1')
assert(sp.statements[0].sub.name == 'TP53BP2')
assert(sp.statements[0].residue == 'S')
assert(sp.statements[0].position == '827')
assert (len(sp.statements[0].evidence) == 1)
ev = sp.statements[0].evidence[0]
assert (ev.pmid == '12345')
assert (ev.text)
assert (ev.source_api == 'sparser')
| from indra import sparser
xml_str1 = '''
<article pmid="54321">
<interpretation>
<sentence-text>MEK1 phosphorylates ERK1</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="agent">
<ref category="protein">
<var name="name">MP2K1_HUMAN</var>
<var name="uid">UP:MP2K1_HUMAN</var>
</ref>
</var>
<var name="substrate">
<ref category="protein">
<var name="name">MK03_HUMAN</var>
<var name="uid">UP:MK03_HUMAN</var>
</ref>
</var>
<var name="present"><ref category="present"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
xml_str2 = '''
<article pmid="12345">
<interpretation>
<sentence-text>Hence ASPP2 can be phosphorylated at serine 827 by MAPK1 in vitro</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="subordinate-conjunction">
<ref category="subordinate-conjunction"><var name="word">hence</var></ref></var>
<var name="substrate">
<ref category="protein">
<var name="name">ASPP2_HUMAN</var>
<var name="uid">UP:ASPP2_HUMAN</var>
</ref>
</var>
<var name="agent">
<ref category="protein">
<var name="context">
<ref category="in-vitro"></ref>
</var>
<var name="uid">UP:MK01_HUMAN</var>
<var name="name">MK01_HUMAN</var>
</ref>
</var>
<var name="site">
<ref category="residue-on-protein">
<var name="amino-acid">
<ref category="amino-acid"><var name="name">serine</var></ref>
</var>
<var name="position"> 827</var>
</ref>
</var>
<var name="modal"><ref category="can"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
def test_invalid_xml():
sp = sparser.process_xml('xyz')
assert(sp is None)
def test_phosphorylation():
sp = sparser.process_xml(xml_str1)
assert(len(sp.statements) == 1)
assert(sp.statements[0].enz.name == 'MAP2K1')
assert(sp.statements[0].sub.name == 'MAPK3')
assert(len(sp.statements[0].evidence) == 1)
ev = sp.statements[0].evidence[0]
assert(ev.pmid == '54321')
assert(ev.text)
assert(ev.source_api == 'sparser')
def test_phosphorylation2():
sp = sparser.process_xml(xml_str2)
assert(len(sp.statements) == 1)
assert(sp.statements[0].enz.name == 'MAPK1')
assert(sp.statements[0].sub.name == 'TP53BP2')
assert(sp.statements[0].residue == 'S')
assert(sp.statements[0].position == '827')
assert (len(sp.statements[0].evidence) == 1)
ev = sp.statements[0].evidence[0]
assert (ev.pmid == '12345')
assert (ev.text)
assert (ev.source_api == 'sparser') | en | 0.235092 | <article pmid="54321"> <interpretation> <sentence-text>MEK1 phosphorylates ERK1</sentence-text> <sem> <ref category="phosphorylate"> <var name="agent"> <ref category="protein"> <var name="name">MP2K1_HUMAN</var> <var name="uid">UP:MP2K1_HUMAN</var> </ref> </var> <var name="substrate"> <ref category="protein"> <var name="name">MK03_HUMAN</var> <var name="uid">UP:MK03_HUMAN</var> </ref> </var> <var name="present"><ref category="present"></ref></var> </ref> </sem> </interpretation> </article> <article pmid="12345"> <interpretation> <sentence-text>Hence ASPP2 can be phosphorylated at serine 827 by MAPK1 in vitro</sentence-text> <sem> <ref category="phosphorylate"> <var name="subordinate-conjunction"> <ref category="subordinate-conjunction"><var name="word">hence</var></ref></var> <var name="substrate"> <ref category="protein"> <var name="name">ASPP2_HUMAN</var> <var name="uid">UP:ASPP2_HUMAN</var> </ref> </var> <var name="agent"> <ref category="protein"> <var name="context"> <ref category="in-vitro"></ref> </var> <var name="uid">UP:MK01_HUMAN</var> <var name="name">MK01_HUMAN</var> </ref> </var> <var name="site"> <ref category="residue-on-protein"> <var name="amino-acid"> <ref category="amino-acid"><var name="name">serine</var></ref> </var> <var name="position"> 827</var> </ref> </var> <var name="modal"><ref category="can"></ref></var> </ref> </sem> </interpretation> </article> | 2.14343 | 2 |
examples/quickstart/run_example.py | siforrer/coreali | 0 | 9448 | """ Simple Example using coreali to access a register model. Needs no h^ardware"""
# Import dependencies and compile register model with systemrdl-compiler
from systemrdl import RDLCompiler
import coreali
import numpy as np
import os
from coreali import RegisterModel
rdlc = RDLCompiler()
rdlc.compile_file(os.path.dirname(__file__)+"/../systemrdl/logger.rdl")
root = rdlc.elaborate()
# Generate hierarchical register model
rio = coreali.registerio.RegIoNoHW(np.zeros([256], np.uint8()))
logger = RegisterModel(root, rio)
# Use the generated register model
logger.Ctrl.read()
logger.LogMem.write(0,[1,2,3])
logger.LogMem.read()
logger.LogMem[1].write(0,[11,12,13])
print(logger)
| """ Simple Example using coreali to access a register model. Needs no h^ardware"""
# Import dependencies and compile register model with systemrdl-compiler
from systemrdl import RDLCompiler
import coreali
import numpy as np
import os
from coreali import RegisterModel
rdlc = RDLCompiler()
rdlc.compile_file(os.path.dirname(__file__)+"/../systemrdl/logger.rdl")
root = rdlc.elaborate()
# Generate hierarchical register model
rio = coreali.registerio.RegIoNoHW(np.zeros([256], np.uint8()))
logger = RegisterModel(root, rio)
# Use the generated register model
logger.Ctrl.read()
logger.LogMem.write(0,[1,2,3])
logger.LogMem.read()
logger.LogMem[1].write(0,[11,12,13])
print(logger)
| en | 0.66028 | Simple Example using coreali to access a register model. Needs no h^ardware # Import dependencies and compile register model with systemrdl-compiler # Generate hierarchical register model # Use the generated register model | 2.169975 | 2 |
src/python/pants/base/specs.py | mcguigan/pants | 0 | 9449 | <reponame>mcguigan/pants
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from pants.engine.fs import PathGlobs
from pants.engine.objects import Collection
from pants.option.custom_types import GlobExpansionConjunction
from pants.option.global_options import GlobMatchErrorBehavior
from pants.util.collections import assert_single_element
from pants.util.dirutil import fast_relpath_optional, recursive_dirname
from pants.util.filtering import create_filters, wrap_filters
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
if TYPE_CHECKING:
from pants.engine.mapper import AddressFamily, AddressMapper
class Spec(ABC):
"""A specification for what Pants should operate on."""
@abstractmethod
def to_spec_string(self) -> str:
"""Return the normalized string representation of this spec."""
class AddressSpec(Spec, metaclass=ABCMeta):
"""Represents address selectors as passed from the command line.
Supports `Single` target addresses as well as `Sibling` (:) and `Descendant` (::) selector forms.
Note: In general, 'spec' should not be a user visible term, it is usually appropriate to
substitute 'address' for a spec resolved to an address, or 'address selector' if you are
referring to an unresolved spec string.
"""
class AddressFamilyResolutionError(Exception):
pass
@abstractmethod
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
"""Given a dict of (namespace path) -> AddressFamily, return the values matching this address
spec.
:raises: :class:`AddressSpec.AddressFamilyResolutionError` if no address families matched this spec.
"""
@classmethod
def address_families_for_dir(
cls, address_families_dict: Dict[str, "AddressFamily"], spec_dir_path: str
) -> List["AddressFamily"]:
"""Implementation of `matching_address_families()` for address specs matching at most
one directory."""
maybe_af = address_families_dict.get(spec_dir_path, None)
if maybe_af is None:
raise cls.AddressFamilyResolutionError(
'Path "{}" does not contain any BUILD files.'
.format(spec_dir_path))
return [maybe_af]
class AddressResolutionError(Exception):
pass
@abstractmethod
def address_target_pairs_from_address_families(self, address_families: List["AddressFamily"]):
"""Given a list of AddressFamily, return (address, target) pairs matching this address spec.
:raises: :class:`SingleAddress._SingleAddressResolutionError` for resolution errors with a
:class:`SingleAddress` instance.
:raises: :class:`AddressSpec.AddressResolutionError` if no targets could be found otherwise, if
the address spec type requires a non-empty set of targets.
:return: list of (Address, Target) pairs.
"""
@classmethod
def all_address_target_pairs(cls, address_families):
"""Implementation of `address_target_pairs_from_address_families()` which does no filtering."""
addr_tgt_pairs = []
for af in address_families:
addr_tgt_pairs.extend(af.addressables.items())
return addr_tgt_pairs
@abstractmethod
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
"""Generate glob patterns matching exactly all the BUILD files this address spec covers."""
@classmethod
def globs_in_single_dir(cls, spec_dir_path: str, address_mapper: "AddressMapper") -> List[str]:
"""Implementation of `make_glob_patterns()` which only allows a single base directory."""
return [os.path.join(spec_dir_path, pat) for pat in address_mapper.build_patterns]
@dataclass(frozen=True)
class SingleAddress(AddressSpec):
"""An AddressSpec for a single address."""
directory: str
name: str
def __post_init__(self) -> None:
if self.directory is None:
raise ValueError(f'A SingleAddress must have a directory. Got: {self}')
if self.name is None:
raise ValueError(f'A SingleAddress must have a name. Got: {self}')
def to_spec_string(self) -> str:
return '{}:{}'.format(self.directory, self.name)
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"]
) -> List["AddressFamily"]:
return self.address_families_for_dir(address_families_dict, self.directory)
class _SingleAddressResolutionError(Exception):
def __init__(self, single_address_family: "AddressFamily", name: str) -> None:
super().__init__()
self.single_address_family = single_address_family
self.name = name
def address_target_pairs_from_address_families(self, address_families: Sequence["AddressFamily"]):
"""Return the pair for the single target matching the single AddressFamily, or error.
:raises: :class:`SingleAddress._SingleAddressResolutionError` if no targets could be found for a
:class:`SingleAddress` instance.
:return: list of (Address, Target) pairs with exactly one element.
"""
single_af = assert_single_element(address_families)
addr_tgt_pairs = [
(addr, tgt) for addr, tgt in single_af.addressables.items()
if addr.target_name == self.name
]
if len(addr_tgt_pairs) == 0:
raise self._SingleAddressResolutionError(single_af, self.name)
# There will be at most one target with a given name in a single AddressFamily.
assert(len(addr_tgt_pairs) == 1)
return addr_tgt_pairs
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return self.globs_in_single_dir(self.directory, address_mapper)
@dataclass(frozen=True)
class SiblingAddresses(AddressSpec):
"""An AddressSpec representing all addresses located directly within the given directory."""
directory: str
def to_spec_string(self) -> str:
return f'{self.directory}:'
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return self.address_families_for_dir(address_families_dict, self.directory)
def address_target_pairs_from_address_families(self, address_families: Sequence["AddressFamily"]):
return self.all_address_target_pairs(address_families)
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return self.globs_in_single_dir(self.directory, address_mapper)
@dataclass(frozen=True)
class DescendantAddresses(AddressSpec):
"""An AddressSpec representing all addresses located recursively under the given directory."""
directory: str
def to_spec_string(self) -> str:
return f'{self.directory}::'
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return [
af for ns, af in address_families_dict.items()
if fast_relpath_optional(ns, self.directory) is not None
]
def address_target_pairs_from_address_families(self, address_families: Sequence["AddressFamily"]):
addr_tgt_pairs = self.all_address_target_pairs(address_families)
if len(addr_tgt_pairs) == 0:
raise self.AddressResolutionError('AddressSpec {} does not match any targets.'.format(self))
return addr_tgt_pairs
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return [os.path.join(self.directory, '**', pat) for pat in address_mapper.build_patterns]
@dataclass(frozen=True)
class AscendantAddresses(AddressSpec):
"""An AddressSpec representing all addresses located recursively _above_ the given directory."""
directory: str
def to_spec_string(self) -> str:
return f'{self.directory}^'
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return [
af for ns, af in address_families_dict.items()
if fast_relpath_optional(self.directory, ns) is not None
]
def address_target_pairs_from_address_families(self, address_families):
return self.all_address_target_pairs(address_families)
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return [
os.path.join(f, pattern)
for pattern in address_mapper.build_patterns
for f in recursive_dirname(self.directory)
]
_specificity = {
SingleAddress: 0,
SiblingAddresses: 1,
AscendantAddresses: 2,
DescendantAddresses: 3,
type(None): 99
}
def more_specific(
address_spec1: Optional[AddressSpec], address_spec2: Optional[AddressSpec]
) -> AddressSpec:
"""Returns which of the two specs is more specific.
This is useful when a target matches multiple specs, and we want to associate it with
the "most specific" one, which will make the most intuitive sense to the user.
"""
# Note that if either of spec1 or spec2 is None, the other will be returned.
if address_spec1 is None and address_spec2 is None:
raise ValueError('internal error: both specs provided to more_specific() were None')
return cast(
AddressSpec,
address_spec1 if _specificity[type(address_spec1)] < _specificity[type(address_spec2)] else address_spec2
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecsMatcher:
"""Contains filters for the output of a AddressSpecs match.
This class is separated out from `AddressSpecs` to allow for both stuctural equality of the `tags` and
`exclude_patterns`, and for caching of their compiled forms using `@memoized_property` (which uses
the hash of the class instance in its key, and results in a very large key when used with
`AddressSpecs` directly).
"""
tags: Tuple[str, ...]
exclude_patterns: Tuple[str, ...]
def __init__(
self, tags: Optional[Iterable[str]] = None, exclude_patterns: Optional[Iterable[str]] = None,
) -> None:
self.tags = tuple(tags or [])
self.exclude_patterns = tuple(exclude_patterns or [])
@memoized_property
def _exclude_compiled_regexps(self):
return [re.compile(pattern) for pattern in set(self.exclude_patterns or [])]
def _excluded_by_pattern(self, address):
return any(p.search(address.spec) is not None for p in self._exclude_compiled_regexps)
@memoized_property
def _target_tag_matches(self):
def filter_for_tag(tag):
return lambda t: tag in [str(t_tag) for t_tag in t.kwargs().get("tags", [])]
return wrap_filters(create_filters(self.tags, filter_for_tag))
def matches_target_address_pair(self, address, target):
"""
:param Address address: An Address to match
:param HydratedTarget target: The Target for the address.
:return: True if the given Address/HydratedTarget are included by this matcher.
"""
return self._target_tag_matches(target) and not self._excluded_by_pattern(address)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecs:
"""A collection of `AddressSpec`s representing AddressSpec subclasses, and a AddressSpecsMatcher
to filter results."""
dependencies: Tuple[AddressSpec, ...]
matcher: AddressSpecsMatcher
def __init__(
self,
dependencies: Iterable[AddressSpec],
tags: Optional[Iterable[str]] = None,
exclude_patterns: Optional[Iterable[str]] = None,
) -> None:
self.dependencies = tuple(dependencies)
self.matcher = AddressSpecsMatcher(tags=tags, exclude_patterns=exclude_patterns)
def __iter__(self) -> Iterator[AddressSpec]:
return iter(self.dependencies)
class FilesystemSpec(Spec, metaclass=ABCMeta):
pass
@dataclass(frozen=True)
class FilesystemLiteralSpec(FilesystemSpec):
"""A literal file name, e.g. `foo.py`."""
file: str
def to_spec_string(self) -> str:
return self.file
@dataclass(frozen=True)
class FilesystemGlobSpec(FilesystemSpec):
"""A spec with a glob or globs, e.g. `*.py` and `**/*.java`."""
glob: str
def to_spec_string(self) -> str:
return self.glob
@dataclass(frozen=True)
class FilesystemIgnoreSpec(FilesystemSpec):
"""A spec to ignore certain files or globs."""
glob: str
def __post_init__(self) -> None:
if self.glob.startswith("!"):
raise ValueError(f"The `glob` for {self} should not start with `!`.")
def to_spec_string(self) -> str:
return f"!{self.glob}"
class FilesystemSpecs(Collection[FilesystemSpec]):
@memoized_property
def includes(self) -> Tuple[Union[FilesystemLiteralSpec, FilesystemGlobSpec], ...]:
return tuple(
spec for spec in self.dependencies
if isinstance(spec, (FilesystemGlobSpec, FilesystemLiteralSpec))
)
@memoized_property
def ignores(self) -> Tuple[FilesystemIgnoreSpec, ...]:
return tuple(spec for spec in self.dependencies if isinstance(spec, FilesystemIgnoreSpec))
@staticmethod
def _generate_path_globs(specs: Iterable[FilesystemSpec]) -> PathGlobs:
return PathGlobs(
globs=(s.to_spec_string() for s in specs),
# We error on unmatched globs for consistency with unmatched address specs. This also
# ensures that scripts don't silently do the wrong thing.
glob_match_error_behavior=GlobMatchErrorBehavior.error,
# We validate that _every_ glob is valid.
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="file arguments",
)
def path_globs_for_spec(
self, spec: Union[FilesystemLiteralSpec, FilesystemGlobSpec]
) -> PathGlobs:
"""Generate PathGlobs for the specific spec, automatically including the instance's
FilesystemIgnoreSpecs.
"""
return self._generate_path_globs(specs=(spec, *self.ignores))
def to_path_globs(self) -> PathGlobs:
"""Generate a single PathGlobs for the instance."""
return self._generate_path_globs(specs=(*self.includes, *self.ignores))
class AmbiguousSpecs(Exception):
pass
@dataclass(frozen=True)
class Specs:
address_specs: AddressSpecs
filesystem_specs: FilesystemSpecs
def __post_init__(self) -> None:
if self.address_specs.dependencies and self.filesystem_specs.dependencies:
raise AmbiguousSpecs(
"Both address specs and filesystem specs given. Please use only one type of spec.\n\n"
f"Address specs: {', '.join(spec.to_spec_string() for spec in self.address_specs)}\n"
f"Filesystem specs: {', '.join(spec.to_spec_string() for spec in self.filesystem_specs)}"
)
@property
def provided_specs(self) -> Union[AddressSpecs, FilesystemSpecs]:
"""Return whichever types of specs was provided by the user.
It is guaranteed that there will only ever be AddressSpecs or FilesystemSpecs, but not both,
through validation in the constructor."""
return (
self.filesystem_specs
if self.filesystem_specs.dependencies
else self.address_specs
)
| # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from pants.engine.fs import PathGlobs
from pants.engine.objects import Collection
from pants.option.custom_types import GlobExpansionConjunction
from pants.option.global_options import GlobMatchErrorBehavior
from pants.util.collections import assert_single_element
from pants.util.dirutil import fast_relpath_optional, recursive_dirname
from pants.util.filtering import create_filters, wrap_filters
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
if TYPE_CHECKING:
from pants.engine.mapper import AddressFamily, AddressMapper
class Spec(ABC):
"""A specification for what Pants should operate on."""
@abstractmethod
def to_spec_string(self) -> str:
"""Return the normalized string representation of this spec."""
class AddressSpec(Spec, metaclass=ABCMeta):
"""Represents address selectors as passed from the command line.
Supports `Single` target addresses as well as `Sibling` (:) and `Descendant` (::) selector forms.
Note: In general, 'spec' should not be a user visible term, it is usually appropriate to
substitute 'address' for a spec resolved to an address, or 'address selector' if you are
referring to an unresolved spec string.
"""
class AddressFamilyResolutionError(Exception):
pass
@abstractmethod
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
"""Given a dict of (namespace path) -> AddressFamily, return the values matching this address
spec.
:raises: :class:`AddressSpec.AddressFamilyResolutionError` if no address families matched this spec.
"""
@classmethod
def address_families_for_dir(
cls, address_families_dict: Dict[str, "AddressFamily"], spec_dir_path: str
) -> List["AddressFamily"]:
"""Implementation of `matching_address_families()` for address specs matching at most
one directory."""
maybe_af = address_families_dict.get(spec_dir_path, None)
if maybe_af is None:
raise cls.AddressFamilyResolutionError(
'Path "{}" does not contain any BUILD files.'
.format(spec_dir_path))
return [maybe_af]
class AddressResolutionError(Exception):
pass
@abstractmethod
def address_target_pairs_from_address_families(self, address_families: List["AddressFamily"]):
"""Given a list of AddressFamily, return (address, target) pairs matching this address spec.
:raises: :class:`SingleAddress._SingleAddressResolutionError` for resolution errors with a
:class:`SingleAddress` instance.
:raises: :class:`AddressSpec.AddressResolutionError` if no targets could be found otherwise, if
the address spec type requires a non-empty set of targets.
:return: list of (Address, Target) pairs.
"""
@classmethod
def all_address_target_pairs(cls, address_families):
"""Implementation of `address_target_pairs_from_address_families()` which does no filtering."""
addr_tgt_pairs = []
for af in address_families:
addr_tgt_pairs.extend(af.addressables.items())
return addr_tgt_pairs
@abstractmethod
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
"""Generate glob patterns matching exactly all the BUILD files this address spec covers."""
@classmethod
def globs_in_single_dir(cls, spec_dir_path: str, address_mapper: "AddressMapper") -> List[str]:
"""Implementation of `make_glob_patterns()` which only allows a single base directory."""
return [os.path.join(spec_dir_path, pat) for pat in address_mapper.build_patterns]
@dataclass(frozen=True)
class SingleAddress(AddressSpec):
"""An AddressSpec for a single address."""
directory: str
name: str
def __post_init__(self) -> None:
if self.directory is None:
raise ValueError(f'A SingleAddress must have a directory. Got: {self}')
if self.name is None:
raise ValueError(f'A SingleAddress must have a name. Got: {self}')
def to_spec_string(self) -> str:
return '{}:{}'.format(self.directory, self.name)
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"]
) -> List["AddressFamily"]:
return self.address_families_for_dir(address_families_dict, self.directory)
class _SingleAddressResolutionError(Exception):
def __init__(self, single_address_family: "AddressFamily", name: str) -> None:
super().__init__()
self.single_address_family = single_address_family
self.name = name
def address_target_pairs_from_address_families(self, address_families: Sequence["AddressFamily"]):
"""Return the pair for the single target matching the single AddressFamily, or error.
:raises: :class:`SingleAddress._SingleAddressResolutionError` if no targets could be found for a
:class:`SingleAddress` instance.
:return: list of (Address, Target) pairs with exactly one element.
"""
single_af = assert_single_element(address_families)
addr_tgt_pairs = [
(addr, tgt) for addr, tgt in single_af.addressables.items()
if addr.target_name == self.name
]
if len(addr_tgt_pairs) == 0:
raise self._SingleAddressResolutionError(single_af, self.name)
# There will be at most one target with a given name in a single AddressFamily.
assert(len(addr_tgt_pairs) == 1)
return addr_tgt_pairs
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return self.globs_in_single_dir(self.directory, address_mapper)
@dataclass(frozen=True)
class SiblingAddresses(AddressSpec):
"""An AddressSpec representing all addresses located directly within the given directory."""
directory: str
def to_spec_string(self) -> str:
return f'{self.directory}:'
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return self.address_families_for_dir(address_families_dict, self.directory)
def address_target_pairs_from_address_families(self, address_families: Sequence["AddressFamily"]):
return self.all_address_target_pairs(address_families)
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return self.globs_in_single_dir(self.directory, address_mapper)
@dataclass(frozen=True)
class DescendantAddresses(AddressSpec):
"""An AddressSpec representing all addresses located recursively under the given directory."""
directory: str
def to_spec_string(self) -> str:
return f'{self.directory}::'
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return [
af for ns, af in address_families_dict.items()
if fast_relpath_optional(ns, self.directory) is not None
]
def address_target_pairs_from_address_families(self, address_families: Sequence["AddressFamily"]):
addr_tgt_pairs = self.all_address_target_pairs(address_families)
if len(addr_tgt_pairs) == 0:
raise self.AddressResolutionError('AddressSpec {} does not match any targets.'.format(self))
return addr_tgt_pairs
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return [os.path.join(self.directory, '**', pat) for pat in address_mapper.build_patterns]
@dataclass(frozen=True)
class AscendantAddresses(AddressSpec):
"""An AddressSpec representing all addresses located recursively _above_ the given directory."""
directory: str
def to_spec_string(self) -> str:
return f'{self.directory}^'
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return [
af for ns, af in address_families_dict.items()
if fast_relpath_optional(self.directory, ns) is not None
]
def address_target_pairs_from_address_families(self, address_families):
return self.all_address_target_pairs(address_families)
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return [
os.path.join(f, pattern)
for pattern in address_mapper.build_patterns
for f in recursive_dirname(self.directory)
]
_specificity = {
SingleAddress: 0,
SiblingAddresses: 1,
AscendantAddresses: 2,
DescendantAddresses: 3,
type(None): 99
}
def more_specific(
address_spec1: Optional[AddressSpec], address_spec2: Optional[AddressSpec]
) -> AddressSpec:
"""Returns which of the two specs is more specific.
This is useful when a target matches multiple specs, and we want to associate it with
the "most specific" one, which will make the most intuitive sense to the user.
"""
# Note that if either of spec1 or spec2 is None, the other will be returned.
if address_spec1 is None and address_spec2 is None:
raise ValueError('internal error: both specs provided to more_specific() were None')
return cast(
AddressSpec,
address_spec1 if _specificity[type(address_spec1)] < _specificity[type(address_spec2)] else address_spec2
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecsMatcher:
"""Contains filters for the output of a AddressSpecs match.
This class is separated out from `AddressSpecs` to allow for both stuctural equality of the `tags` and
`exclude_patterns`, and for caching of their compiled forms using `@memoized_property` (which uses
the hash of the class instance in its key, and results in a very large key when used with
`AddressSpecs` directly).
"""
tags: Tuple[str, ...]
exclude_patterns: Tuple[str, ...]
def __init__(
self, tags: Optional[Iterable[str]] = None, exclude_patterns: Optional[Iterable[str]] = None,
) -> None:
self.tags = tuple(tags or [])
self.exclude_patterns = tuple(exclude_patterns or [])
@memoized_property
def _exclude_compiled_regexps(self):
return [re.compile(pattern) for pattern in set(self.exclude_patterns or [])]
def _excluded_by_pattern(self, address):
return any(p.search(address.spec) is not None for p in self._exclude_compiled_regexps)
@memoized_property
def _target_tag_matches(self):
def filter_for_tag(tag):
return lambda t: tag in [str(t_tag) for t_tag in t.kwargs().get("tags", [])]
return wrap_filters(create_filters(self.tags, filter_for_tag))
def matches_target_address_pair(self, address, target):
"""
:param Address address: An Address to match
:param HydratedTarget target: The Target for the address.
:return: True if the given Address/HydratedTarget are included by this matcher.
"""
return self._target_tag_matches(target) and not self._excluded_by_pattern(address)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecs:
"""A collection of `AddressSpec`s representing AddressSpec subclasses, and a AddressSpecsMatcher
to filter results."""
dependencies: Tuple[AddressSpec, ...]
matcher: AddressSpecsMatcher
def __init__(
self,
dependencies: Iterable[AddressSpec],
tags: Optional[Iterable[str]] = None,
exclude_patterns: Optional[Iterable[str]] = None,
) -> None:
self.dependencies = tuple(dependencies)
self.matcher = AddressSpecsMatcher(tags=tags, exclude_patterns=exclude_patterns)
def __iter__(self) -> Iterator[AddressSpec]:
return iter(self.dependencies)
class FilesystemSpec(Spec, metaclass=ABCMeta):
pass
@dataclass(frozen=True)
class FilesystemLiteralSpec(FilesystemSpec):
"""A literal file name, e.g. `foo.py`."""
file: str
def to_spec_string(self) -> str:
return self.file
@dataclass(frozen=True)
class FilesystemGlobSpec(FilesystemSpec):
"""A spec with a glob or globs, e.g. `*.py` and `**/*.java`."""
glob: str
def to_spec_string(self) -> str:
return self.glob
@dataclass(frozen=True)
class FilesystemIgnoreSpec(FilesystemSpec):
"""A spec to ignore certain files or globs."""
glob: str
def __post_init__(self) -> None:
if self.glob.startswith("!"):
raise ValueError(f"The `glob` for {self} should not start with `!`.")
def to_spec_string(self) -> str:
return f"!{self.glob}"
class FilesystemSpecs(Collection[FilesystemSpec]):
@memoized_property
def includes(self) -> Tuple[Union[FilesystemLiteralSpec, FilesystemGlobSpec], ...]:
return tuple(
spec for spec in self.dependencies
if isinstance(spec, (FilesystemGlobSpec, FilesystemLiteralSpec))
)
@memoized_property
def ignores(self) -> Tuple[FilesystemIgnoreSpec, ...]:
return tuple(spec for spec in self.dependencies if isinstance(spec, FilesystemIgnoreSpec))
@staticmethod
def _generate_path_globs(specs: Iterable[FilesystemSpec]) -> PathGlobs:
return PathGlobs(
globs=(s.to_spec_string() for s in specs),
# We error on unmatched globs for consistency with unmatched address specs. This also
# ensures that scripts don't silently do the wrong thing.
glob_match_error_behavior=GlobMatchErrorBehavior.error,
# We validate that _every_ glob is valid.
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="file arguments",
)
def path_globs_for_spec(
self, spec: Union[FilesystemLiteralSpec, FilesystemGlobSpec]
) -> PathGlobs:
"""Generate PathGlobs for the specific spec, automatically including the instance's
FilesystemIgnoreSpecs.
"""
return self._generate_path_globs(specs=(spec, *self.ignores))
def to_path_globs(self) -> PathGlobs:
"""Generate a single PathGlobs for the instance."""
return self._generate_path_globs(specs=(*self.includes, *self.ignores))
class AmbiguousSpecs(Exception):
pass
@dataclass(frozen=True)
class Specs:
address_specs: AddressSpecs
filesystem_specs: FilesystemSpecs
def __post_init__(self) -> None:
if self.address_specs.dependencies and self.filesystem_specs.dependencies:
raise AmbiguousSpecs(
"Both address specs and filesystem specs given. Please use only one type of spec.\n\n"
f"Address specs: {', '.join(spec.to_spec_string() for spec in self.address_specs)}\n"
f"Filesystem specs: {', '.join(spec.to_spec_string() for spec in self.filesystem_specs)}"
)
@property
def provided_specs(self) -> Union[AddressSpecs, FilesystemSpecs]:
"""Return whichever types of specs was provided by the user.
It is guaranteed that there will only ever be AddressSpecs or FilesystemSpecs, but not both,
through validation in the constructor."""
return (
self.filesystem_specs
if self.filesystem_specs.dependencies
else self.address_specs
) | en | 0.859224 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). A specification for what Pants should operate on. Return the normalized string representation of this spec. Represents address selectors as passed from the command line. Supports `Single` target addresses as well as `Sibling` (:) and `Descendant` (::) selector forms. Note: In general, 'spec' should not be a user visible term, it is usually appropriate to substitute 'address' for a spec resolved to an address, or 'address selector' if you are referring to an unresolved spec string. Given a dict of (namespace path) -> AddressFamily, return the values matching this address spec. :raises: :class:`AddressSpec.AddressFamilyResolutionError` if no address families matched this spec. Implementation of `matching_address_families()` for address specs matching at most one directory. Given a list of AddressFamily, return (address, target) pairs matching this address spec. :raises: :class:`SingleAddress._SingleAddressResolutionError` for resolution errors with a :class:`SingleAddress` instance. :raises: :class:`AddressSpec.AddressResolutionError` if no targets could be found otherwise, if the address spec type requires a non-empty set of targets. :return: list of (Address, Target) pairs. Implementation of `address_target_pairs_from_address_families()` which does no filtering. Generate glob patterns matching exactly all the BUILD files this address spec covers. Implementation of `make_glob_patterns()` which only allows a single base directory. An AddressSpec for a single address. Return the pair for the single target matching the single AddressFamily, or error. :raises: :class:`SingleAddress._SingleAddressResolutionError` if no targets could be found for a :class:`SingleAddress` instance. :return: list of (Address, Target) pairs with exactly one element. # There will be at most one target with a given name in a single AddressFamily. An AddressSpec representing all addresses located directly within the given directory. An AddressSpec representing all addresses located recursively under the given directory. An AddressSpec representing all addresses located recursively _above_ the given directory. Returns which of the two specs is more specific. This is useful when a target matches multiple specs, and we want to associate it with the "most specific" one, which will make the most intuitive sense to the user. # Note that if either of spec1 or spec2 is None, the other will be returned. Contains filters for the output of a AddressSpecs match. This class is separated out from `AddressSpecs` to allow for both stuctural equality of the `tags` and `exclude_patterns`, and for caching of their compiled forms using `@memoized_property` (which uses the hash of the class instance in its key, and results in a very large key when used with `AddressSpecs` directly). :param Address address: An Address to match :param HydratedTarget target: The Target for the address. :return: True if the given Address/HydratedTarget are included by this matcher. A collection of `AddressSpec`s representing AddressSpec subclasses, and a AddressSpecsMatcher to filter results. A literal file name, e.g. `foo.py`. A spec with a glob or globs, e.g. `*.py` and `**/*.java`. A spec to ignore certain files or globs. # We error on unmatched globs for consistency with unmatched address specs. This also # ensures that scripts don't silently do the wrong thing. # We validate that _every_ glob is valid. Generate PathGlobs for the specific spec, automatically including the instance's FilesystemIgnoreSpecs. Generate a single PathGlobs for the instance. Return whichever types of specs was provided by the user. It is guaranteed that there will only ever be AddressSpecs or FilesystemSpecs, but not both, through validation in the constructor. | 2.243697 | 2 |
Mock/MockRequesterMixin.py | GordiigPinny/ApiRequesters | 0 | 9450 | import json
import requests
from enum import Enum
from typing import Dict
from ..exceptions import JsonDecodeError, UnexpectedResponse, RequestError, BaseApiRequestError
class MockRequesterMixin:
"""
Набор методов для моков реквестеров
"""
class ERRORS(Enum):
ERROR_TOKEN = 'error'
BAD_CODE_400_TOKEN = 'bad<PASSWORD>00'
BAD_CODE_401_TOKEN = '<PASSWORD>'
BAD_CODE_403_TOKEN = '<PASSWORD>'
BAD_CODE_404_TOKEN = '<PASSWORD>'
class ERRORS_KEYS(Enum):
AUTH = 'auth_error'
APP_AUTH = 'app_auth_error'
USERS = 'users_error'
AWARDS = 'awards_error'
PLACES = 'places_error'
STATS = 'stats_error'
MEDIA = 'media_error'
class ROLES(Enum):
ANON = 'anon'
USER = 'user'
MODERATOR = 'moderator'
SUPERUSER = 'superuser'
@classmethod
def get_all_roles_tuple(cls):
return tuple([x.value for x in cls.ROLES])
@classmethod
def get_all_registered_roles_tuple(cls):
all_roles = list(cls.get_all_roles_tuple())
all_roles.remove(cls.ROLES.ANON.value)
return tuple(all_roles)
@classmethod
def get_all_errors_tuple(cls):
return tuple([x.value for x in cls.ERRORS])
def get_token_dict(self, token: str) -> Dict[str, str]:
return json.loads(token)
def get_role_part(self, token: str) -> str:
return self.get_token_dict(token)['role']
def get_auth_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.AUTH.value]
def get_app_auth_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.APP_AUTH.value]
def get_awards_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.AWARDS.value]
def get_places_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.PLACES.value]
def get_users_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.USERS.value]
def get_stats_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.STATS.value]
def get_media_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.MEDIA.value]
# Этот метод оверрайдить во всех классах-моках для выборки нужной ошибки из токена
def get_mine_error_part(self, token):
raise NotImplementedError
# Этот метод оверрайдить во всех классах-моках для отправки джосн-ответа
def get_object_on_success(self, token=None):
raise NotImplementedError
# Этот оверрайдить, если дсоны на GET/POST отличаются
def get_list_object_on_success(self, token=None):
return self.get_object_on_success(token)
def get_coded_response(self, code: int) -> requests.Response:
resp = requests.Response()
resp.status_code = code
return resp
def raise_coded_error(self, code: int):
resp = self.get_coded_response(code)
raise UnexpectedResponse(resp)
def _handle_errors(self, token):
"""
Обработка ошибок, переданных в с токеном
"""
token = self.get_mine_error_part(token)
if token == self.ERRORS.ERROR_TOKEN.value:
raise BaseApiRequestError()
elif token == self.ERRORS.BAD_CODE_400_TOKEN.value:
self.raise_coded_error(400)
elif token == self.ERRORS.BAD_CODE_401_TOKEN.value:
self.raise_coded_error(401)
elif token == self.ERRORS.BAD_CODE_403_TOKEN.value:
self.raise_coded_error(403)
elif token == self.ERRORS.BAD_CODE_404_TOKEN.value:
self.raise_coded_error(404)
def _mock_token_handler(self, token: str, list_object=False):
"""
Базовый метод обработки моковых токенов
"""
self._handle_errors(token)
if list_object:
return requests.Response(), self.get_list_object_on_success(token)
else:
return requests.Response(), self.get_object_on_success(token)
| import json
import requests
from enum import Enum
from typing import Dict
from ..exceptions import JsonDecodeError, UnexpectedResponse, RequestError, BaseApiRequestError
class MockRequesterMixin:
"""
Набор методов для моков реквестеров
"""
class ERRORS(Enum):
ERROR_TOKEN = 'error'
BAD_CODE_400_TOKEN = 'bad<PASSWORD>00'
BAD_CODE_401_TOKEN = '<PASSWORD>'
BAD_CODE_403_TOKEN = '<PASSWORD>'
BAD_CODE_404_TOKEN = '<PASSWORD>'
class ERRORS_KEYS(Enum):
AUTH = 'auth_error'
APP_AUTH = 'app_auth_error'
USERS = 'users_error'
AWARDS = 'awards_error'
PLACES = 'places_error'
STATS = 'stats_error'
MEDIA = 'media_error'
class ROLES(Enum):
ANON = 'anon'
USER = 'user'
MODERATOR = 'moderator'
SUPERUSER = 'superuser'
@classmethod
def get_all_roles_tuple(cls):
return tuple([x.value for x in cls.ROLES])
@classmethod
def get_all_registered_roles_tuple(cls):
all_roles = list(cls.get_all_roles_tuple())
all_roles.remove(cls.ROLES.ANON.value)
return tuple(all_roles)
@classmethod
def get_all_errors_tuple(cls):
return tuple([x.value for x in cls.ERRORS])
def get_token_dict(self, token: str) -> Dict[str, str]:
return json.loads(token)
def get_role_part(self, token: str) -> str:
return self.get_token_dict(token)['role']
def get_auth_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.AUTH.value]
def get_app_auth_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.APP_AUTH.value]
def get_awards_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.AWARDS.value]
def get_places_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.PLACES.value]
def get_users_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.USERS.value]
def get_stats_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.STATS.value]
def get_media_error_part(self, token: str) -> str:
return self.get_token_dict(token)[self.ERRORS_KEYS.MEDIA.value]
# Этот метод оверрайдить во всех классах-моках для выборки нужной ошибки из токена
def get_mine_error_part(self, token):
raise NotImplementedError
# Этот метод оверрайдить во всех классах-моках для отправки джосн-ответа
def get_object_on_success(self, token=None):
raise NotImplementedError
# Этот оверрайдить, если дсоны на GET/POST отличаются
def get_list_object_on_success(self, token=None):
return self.get_object_on_success(token)
def get_coded_response(self, code: int) -> requests.Response:
resp = requests.Response()
resp.status_code = code
return resp
def raise_coded_error(self, code: int):
resp = self.get_coded_response(code)
raise UnexpectedResponse(resp)
def _handle_errors(self, token):
"""
Обработка ошибок, переданных в с токеном
"""
token = self.get_mine_error_part(token)
if token == self.ERRORS.ERROR_TOKEN.value:
raise BaseApiRequestError()
elif token == self.ERRORS.BAD_CODE_400_TOKEN.value:
self.raise_coded_error(400)
elif token == self.ERRORS.BAD_CODE_401_TOKEN.value:
self.raise_coded_error(401)
elif token == self.ERRORS.BAD_CODE_403_TOKEN.value:
self.raise_coded_error(403)
elif token == self.ERRORS.BAD_CODE_404_TOKEN.value:
self.raise_coded_error(404)
def _mock_token_handler(self, token: str, list_object=False):
"""
Базовый метод обработки моковых токенов
"""
self._handle_errors(token)
if list_object:
return requests.Response(), self.get_list_object_on_success(token)
else:
return requests.Response(), self.get_object_on_success(token)
| ru | 0.997452 | Набор методов для моков реквестеров # Этот метод оверрайдить во всех классах-моках для выборки нужной ошибки из токена # Этот метод оверрайдить во всех классах-моках для отправки джосн-ответа # Этот оверрайдить, если дсоны на GET/POST отличаются Обработка ошибок, переданных в с токеном Базовый метод обработки моковых токенов | 2.496151 | 2 |
tests/test_parse.py | vkleen/skidl | 700 | 9451 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
import pytest
from skidl import netlist_to_skidl
from .setup_teardown import get_filename, setup_function, teardown_function
def test_parser_1():
netlist_to_skidl(get_filename("Arduino_Uno_R3_From_Scratch.net"))
| # -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
import pytest
from skidl import netlist_to_skidl
from .setup_teardown import get_filename, setup_function, teardown_function
def test_parser_1():
netlist_to_skidl(get_filename("Arduino_Uno_R3_From_Scratch.net")) | en | 0.675338 | # -*- coding: utf-8 -*- # The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>. | 1.975225 | 2 |
Projects/envirohat-monitor/clear-screen.py | pkbullock/RaspberryPi | 0 | 9452 | #!/usr/bin/env python3
import ST7735
import sys
st7735 = ST7735.ST7735(
port=0,
cs=1,
dc=9,
backlight=12,
rotation=270,
spi_speed_hz=10000000
)
# Reset the display
st7735.begin()
st7735.reset()
st7735.set_backlight(0)
print "\nDone."
# Exit cleanly
sys.exit(0) | #!/usr/bin/env python3
import ST7735
import sys
st7735 = ST7735.ST7735(
port=0,
cs=1,
dc=9,
backlight=12,
rotation=270,
spi_speed_hz=10000000
)
# Reset the display
st7735.begin()
st7735.reset()
st7735.set_backlight(0)
print "\nDone."
# Exit cleanly
sys.exit(0) | en | 0.282549 | #!/usr/bin/env python3 # Reset the display # Exit cleanly | 2.342625 | 2 |
Scripts/nominatintest.py | carlosdenner/business_atlas | 0 | 9453 | <reponame>carlosdenner/business_atlas
from geopy.geocoders import Nominatim
from requests.models import LocationParseError
geolocator = Nominatim(user_agent="geoapiExercises")
Latitude = 25.594095
Longitude = 85.137566
def location(Latitude, Longitude):
lat = str(Latitude)
long = str(Longitude)
print(lat + long)
local = lat + "," + long
print(local)
if(len(local) > 3):
location = geolocator.reverse(local)
locStr = str(location)
print(locStr)
splitted = locStr.split(',')
country = splitted[len(splitted) - 1]
print(country)
print("==============país==============")
return country
else:
return ""
location(Latitude, Longitude)
# Display
| from geopy.geocoders import Nominatim
from requests.models import LocationParseError
geolocator = Nominatim(user_agent="geoapiExercises")
Latitude = 25.594095
Longitude = 85.137566
def location(Latitude, Longitude):
lat = str(Latitude)
long = str(Longitude)
print(lat + long)
local = lat + "," + long
print(local)
if(len(local) > 3):
location = geolocator.reverse(local)
locStr = str(location)
print(locStr)
splitted = locStr.split(',')
country = splitted[len(splitted) - 1]
print(country)
print("==============país==============")
return country
else:
return ""
location(Latitude, Longitude)
# Display | none | 1 | 3.111395 | 3 |
|
gamesystem.py | cristilianojr/JOKENPOH | 1 | 9454 | <reponame>cristilianojr/JOKENPOH
import random
from tkinter import PhotoImage
"""
Esse arquivo define os estados do game
"""
def ia_chocer():
"""IA faz a escolha de um numero aleatório"""
posibility = ['rock', 'paper', 'scissor']
value = posibility[random.randint(0, 2)]
return value
def battle_verification(player_choice, ia_choice):
state_victoryorlose = ''
if player_choice == 'rock':
if ia_choice == 'rock':
state_victoryorlose = 'draw'
elif ia_choice == 'scissor':
state_victoryorlose = 'victory'
elif ia_choice == 'paper':
state_victoryorlose = 'defeat'
elif player_choice == 'scissor':
if ia_choice == 'rock':
state_victoryorlose = 'defeat'
elif ia_choice == 'scissor':
state_victoryorlose = 'draw'
elif ia_choice == 'paper':
state_victoryorlose = 'victory'
elif player_choice == 'paper':
if ia_choice == 'rock':
state_victoryorlose = 'victory'
elif ia_choice == 'scissor':
state_victoryorlose = 'defeat'
elif ia_choice == 'paper':
state_victoryorlose = 'draw'
return state_victoryorlose
| import random
from tkinter import PhotoImage
"""
Esse arquivo define os estados do game
"""
def ia_chocer():
"""IA faz a escolha de um numero aleatório"""
posibility = ['rock', 'paper', 'scissor']
value = posibility[random.randint(0, 2)]
return value
def battle_verification(player_choice, ia_choice):
state_victoryorlose = ''
if player_choice == 'rock':
if ia_choice == 'rock':
state_victoryorlose = 'draw'
elif ia_choice == 'scissor':
state_victoryorlose = 'victory'
elif ia_choice == 'paper':
state_victoryorlose = 'defeat'
elif player_choice == 'scissor':
if ia_choice == 'rock':
state_victoryorlose = 'defeat'
elif ia_choice == 'scissor':
state_victoryorlose = 'draw'
elif ia_choice == 'paper':
state_victoryorlose = 'victory'
elif player_choice == 'paper':
if ia_choice == 'rock':
state_victoryorlose = 'victory'
elif ia_choice == 'scissor':
state_victoryorlose = 'defeat'
elif ia_choice == 'paper':
state_victoryorlose = 'draw'
return state_victoryorlose | pt | 0.994861 | Esse arquivo define os estados do game IA faz a escolha de um numero aleatório | 3.385682 | 3 |
train/filelocks.py | mister-bailey/MagNET | 0 | 9455 | from filelock import FileLock, Timeout
import os
import time
class ProcessFileLock(FileLock):
"""
FileLock that is unique per path in each process (for, eg., reentrance)
"""
locks = {}
def __new__(cls, path, *args, **kwargs):
if path in ProcessFileLock.locks:
return ProcessFileLock.locks[path]
else:
lock = super().__new__(cls, path, *args, **kwargs)
lock.__new_init__(path, *args, **kwargs)
ProcessFileLock.locks[path] = lock
return lock
def __new_init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __init__(self, *args, **kwargs):
pass
class ExplosiveFileLock(ProcessFileLock):
def acquire(self, *args, **kwargs):
r = super().acquire(*args, **kwargs)
if self._lock_counter > 1:
raise BlockingIOError(f"Process attempted to reacquire lock for {self._lock_file}")
return r
class HistoriesLock(FileLock):
def __init__(self, dir, ensemble=None):
super().__init__(os.path.join(dir, "histories.lock"))
self.ensemble = ensemble
def release(self, **kwargs):
super().release()
if self.ensemble and self._lock_counter == 0:
self.ensemble.close_histories()
class SamplesLock(FileLock):
def __init__(self, dir, ensemble=None):
super().__init__(os.path.join(dir, "samples.lock"))
self.ensemble = ensemble
def release(self, **kwargs):
if self.ensemble and self._lock_counter == 1:
self.ensemble._test_samples.close()
self.ensemble._test_samples = None
super().release()
def __enter__(self):
print("Acquiring samples lock... ", end='')
super().__enter__()
if self.ensemble._test_samples is None:
from sample_hyperparameters import TrainableSampleGenerator
self.ensemble._test_samples = TrainableSampleGenerator(self.ensemble.config.exploration.sample_file, configs=self.ensemble.config_files, stub=self.ensemble.stub)
print("Done.")
return self.ensemble._test_samples
class ExistLock:
"""
Locks on the existence of the given file.
No guarantees of atomicity!
Unique per process, for reentry
"""
locks={}
def __new__(cls, path, *args, **kwargs):
if path in ExistLock.locks:
lock = ExistLock.locks[path]
#print(f"Reloading ExistLock('{path}')")
#print(f" Lock counter = {lock._lock_counter}")
return lock
else:
#print(f"Creating new ExistLock('{path}')")
lock = super().__new__(cls)
lock.__new_init__(path, *args, **kwargs)
ExistLock.locks[path] = lock
return lock
def __new_init__(self, path, block=True, timeout=None, polling_interval=.05):
self.path = path
if not block:
timeout == 0.0
else:
self.timeout=timeout
self.polling_interval=polling_interval
self._lock_counter = 0
def acquire(self, block=None, timeout=None):
"""
Not atomic. Should probably happen within the context of an
atomic lock.
"""
if block == False:
timeout = 0.0
if timeout is None:
timeout = self.timeout
#print(f"Trying to acquire ExistLock('{self.path}')...")
#print(f" Lock counter = {self._lock_counter}")
start_time = time.time()
while os.path.isfile(self.path):
if self._lock_counter > 0:
self._lock_counter += 1
#print(f"Acquired, lock counter = {self._lock_counter}")
return True
if timeout is None or time.time() - start_time < timeout:
time.sleep(self.polling_interval)
else:
return False
with open(self.path, 'w'):
self._lock_counter = 1
#print(f"Acquired, lock counter = {self._lock_counter}")
return True
def release(self):
self._lock_counter = min(0, self._lock_counter - 1)
if self._lock_counter == 0 and os.path.isfile(self.path):
os.remove(self.path)
def __enter__(self):
if self.acquire():
return self
else:
raise Timeout(f"Failed to acquire ExistLock for file {self.path}")
def __exit__(self, type, value, traceback):
self.release()
| from filelock import FileLock, Timeout
import os
import time
class ProcessFileLock(FileLock):
"""
FileLock that is unique per path in each process (for, eg., reentrance)
"""
locks = {}
def __new__(cls, path, *args, **kwargs):
if path in ProcessFileLock.locks:
return ProcessFileLock.locks[path]
else:
lock = super().__new__(cls, path, *args, **kwargs)
lock.__new_init__(path, *args, **kwargs)
ProcessFileLock.locks[path] = lock
return lock
def __new_init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __init__(self, *args, **kwargs):
pass
class ExplosiveFileLock(ProcessFileLock):
def acquire(self, *args, **kwargs):
r = super().acquire(*args, **kwargs)
if self._lock_counter > 1:
raise BlockingIOError(f"Process attempted to reacquire lock for {self._lock_file}")
return r
class HistoriesLock(FileLock):
def __init__(self, dir, ensemble=None):
super().__init__(os.path.join(dir, "histories.lock"))
self.ensemble = ensemble
def release(self, **kwargs):
super().release()
if self.ensemble and self._lock_counter == 0:
self.ensemble.close_histories()
class SamplesLock(FileLock):
def __init__(self, dir, ensemble=None):
super().__init__(os.path.join(dir, "samples.lock"))
self.ensemble = ensemble
def release(self, **kwargs):
if self.ensemble and self._lock_counter == 1:
self.ensemble._test_samples.close()
self.ensemble._test_samples = None
super().release()
def __enter__(self):
print("Acquiring samples lock... ", end='')
super().__enter__()
if self.ensemble._test_samples is None:
from sample_hyperparameters import TrainableSampleGenerator
self.ensemble._test_samples = TrainableSampleGenerator(self.ensemble.config.exploration.sample_file, configs=self.ensemble.config_files, stub=self.ensemble.stub)
print("Done.")
return self.ensemble._test_samples
class ExistLock:
"""
Locks on the existence of the given file.
No guarantees of atomicity!
Unique per process, for reentry
"""
locks={}
def __new__(cls, path, *args, **kwargs):
if path in ExistLock.locks:
lock = ExistLock.locks[path]
#print(f"Reloading ExistLock('{path}')")
#print(f" Lock counter = {lock._lock_counter}")
return lock
else:
#print(f"Creating new ExistLock('{path}')")
lock = super().__new__(cls)
lock.__new_init__(path, *args, **kwargs)
ExistLock.locks[path] = lock
return lock
def __new_init__(self, path, block=True, timeout=None, polling_interval=.05):
self.path = path
if not block:
timeout == 0.0
else:
self.timeout=timeout
self.polling_interval=polling_interval
self._lock_counter = 0
def acquire(self, block=None, timeout=None):
"""
Not atomic. Should probably happen within the context of an
atomic lock.
"""
if block == False:
timeout = 0.0
if timeout is None:
timeout = self.timeout
#print(f"Trying to acquire ExistLock('{self.path}')...")
#print(f" Lock counter = {self._lock_counter}")
start_time = time.time()
while os.path.isfile(self.path):
if self._lock_counter > 0:
self._lock_counter += 1
#print(f"Acquired, lock counter = {self._lock_counter}")
return True
if timeout is None or time.time() - start_time < timeout:
time.sleep(self.polling_interval)
else:
return False
with open(self.path, 'w'):
self._lock_counter = 1
#print(f"Acquired, lock counter = {self._lock_counter}")
return True
def release(self):
self._lock_counter = min(0, self._lock_counter - 1)
if self._lock_counter == 0 and os.path.isfile(self.path):
os.remove(self.path)
def __enter__(self):
if self.acquire():
return self
else:
raise Timeout(f"Failed to acquire ExistLock for file {self.path}")
def __exit__(self, type, value, traceback):
self.release()
| en | 0.654314 | FileLock that is unique per path in each process (for, eg., reentrance) Locks on the existence of the given file.
No guarantees of atomicity!
Unique per process, for reentry #print(f"Reloading ExistLock('{path}')") #print(f" Lock counter = {lock._lock_counter}") #print(f"Creating new ExistLock('{path}')") Not atomic. Should probably happen within the context of an
atomic lock. #print(f"Trying to acquire ExistLock('{self.path}')...") #print(f" Lock counter = {self._lock_counter}") #print(f"Acquired, lock counter = {self._lock_counter}") #print(f"Acquired, lock counter = {self._lock_counter}") | 3.000469 | 3 |
python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitClassInPy36_after.py | jnthn/intellij-community | 2 | 9456 | class A_NEW_NAME(object):
pass | class A_NEW_NAME(object):
pass | none | 1 | 1.257428 | 1 |
|
speedcom/tests/__init__.py | emissible/emissilbe | 1 | 9457 | <reponame>emissible/emissilbe
#from . import context
#from . import test_NNModels
#from . import test_data_extract
#from . import test_speedcom
#from . import test_utilities
| #from . import context
#from . import test_NNModels
#from . import test_data_extract
#from . import test_speedcom
#from . import test_utilities | en | 0.178629 | #from . import context #from . import test_NNModels #from . import test_data_extract #from . import test_speedcom #from . import test_utilities | 1.022959 | 1 |
todo/management/serializers/tasks.py | Sanguet/todo-challenge | 0 | 9458 | # Django REST Framework
from rest_framework import serializers
# Model
from todo.management.models import Task
# Utils
from todo.utils.tasks import TaskMetrics
from todo.utils.serializer_fields import CompleteNameUser
class TaskModelSerializer(serializers.ModelSerializer):
"""Modelo serializer del circulo"""
user = CompleteNameUser(many=False)
class Meta:
"""Meta class"""
model = Task
fields = (
'id', 'user', 'title',
'date_to_finish', 'is_finalize',
'description', 'created',
'priority', 'color'
)
read_only_fields = (
'id', 'user',
'created',
)
def create(self, data):
"""Creacion de la tarea"""
# Sacamos los datos que ya tenemos en el context
user = self.context['request'].user
data['is_finalize'] = False
# Creamos la tarea
task = Task.objects.create(
user=user,
**data
)
# Puntos al perfil
TaskMetrics(action='Create', user=user)
return task
def update(self, instance, data):
"""Actualizacion de la tarea"""
# Extraemos el user del contexto y mandamos la funcion update
user = self.context['request'].user
new_is_finalize = data.get('is_finalize', instance.is_finalize)
if new_is_finalize != instance.is_finalize:
TaskMetrics(action='Update', user=user, is_finalize=new_is_finalize)
# Actualizamos los datos normales
super(TaskModelSerializer, self).update(instance, data)
return instance
| # Django REST Framework
from rest_framework import serializers
# Model
from todo.management.models import Task
# Utils
from todo.utils.tasks import TaskMetrics
from todo.utils.serializer_fields import CompleteNameUser
class TaskModelSerializer(serializers.ModelSerializer):
"""Modelo serializer del circulo"""
user = CompleteNameUser(many=False)
class Meta:
"""Meta class"""
model = Task
fields = (
'id', 'user', 'title',
'date_to_finish', 'is_finalize',
'description', 'created',
'priority', 'color'
)
read_only_fields = (
'id', 'user',
'created',
)
def create(self, data):
"""Creacion de la tarea"""
# Sacamos los datos que ya tenemos en el context
user = self.context['request'].user
data['is_finalize'] = False
# Creamos la tarea
task = Task.objects.create(
user=user,
**data
)
# Puntos al perfil
TaskMetrics(action='Create', user=user)
return task
def update(self, instance, data):
"""Actualizacion de la tarea"""
# Extraemos el user del contexto y mandamos la funcion update
user = self.context['request'].user
new_is_finalize = data.get('is_finalize', instance.is_finalize)
if new_is_finalize != instance.is_finalize:
TaskMetrics(action='Update', user=user, is_finalize=new_is_finalize)
# Actualizamos los datos normales
super(TaskModelSerializer, self).update(instance, data)
return instance
| es | 0.836195 | # Django REST Framework # Model # Utils Modelo serializer del circulo Meta class Creacion de la tarea # Sacamos los datos que ya tenemos en el context # Creamos la tarea # Puntos al perfil Actualizacion de la tarea # Extraemos el user del contexto y mandamos la funcion update # Actualizamos los datos normales | 2.237841 | 2 |
outlier_detector.py | Sean-Ker/data_homework | 0 | 9459 | <gh_stars>0
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
'''
A function that detects outliers, where k is a tandard deviation threshold hyperparameter preferablly (2, 2.5, 3).
The algo could handle multivariable data frames with any number of features d.
For that manner, it first reduces the dimensionality to 2 using PCA, makes sure that the matrix is positive definite and calculates the Mahalanobis Distance with a threshold value.
Returns a series of n rows back.
'''
def outlier_detector(data, k=2.5):
# Calculate Principal Component Analysis
pca = PCA(n_components=data.shape[1], svd_solver='full')
df = pd.DataFrame(pca.fit_transform(
data), index=data.index, columns=data.columns)
# Calculate covariance and its inverse matrices
cov_matrix = np.cov(df.values, rowvar=False)
inv_cov = np.linalg.inv(cov_matrix)
mean = df.values.mean(axis=0)
# Check matrices are positive definite: https://en.wikipedia.org/wiki/Definiteness_of_a_matrix
assert is_pos_def(cov_matrix) and is_pos_def(inv_cov)
# Calculate Mahalanobis Distance https://en.wikipedia.org/wiki/Mahalanobis_distance
md = mahalanobis_dist(inv_cov, mean, df.values, verbose=False)
threshold = np.mean(md) * k
# res = pd.DataFrame(index=data.index,columns=data.columns)
return data[md > threshold]
# https://www.youtube.com/watch?v=spNpfmWZBmg&t=0s
def mahalanobis_dist(inv_cov_matrix, mean_distr, data, verbose=False):
diff = data - mean_distr
md = []
for i in range(len(diff)):
md.append(np.sqrt(diff[i].dot(inv_cov_matrix).dot(diff[i])))
return np.array(md)
# Check that matrix is positive definite
def is_pos_def(A):
if np.allclose(A, A.T):
try:
np.linalg.cholesky(A)
return True
except np.linalg.LinAlgError:
return False
else:
return False
| import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
'''
A function that detects outliers, where k is a tandard deviation threshold hyperparameter preferablly (2, 2.5, 3).
The algo could handle multivariable data frames with any number of features d.
For that manner, it first reduces the dimensionality to 2 using PCA, makes sure that the matrix is positive definite and calculates the Mahalanobis Distance with a threshold value.
Returns a series of n rows back.
'''
def outlier_detector(data, k=2.5):
# Calculate Principal Component Analysis
pca = PCA(n_components=data.shape[1], svd_solver='full')
df = pd.DataFrame(pca.fit_transform(
data), index=data.index, columns=data.columns)
# Calculate covariance and its inverse matrices
cov_matrix = np.cov(df.values, rowvar=False)
inv_cov = np.linalg.inv(cov_matrix)
mean = df.values.mean(axis=0)
# Check matrices are positive definite: https://en.wikipedia.org/wiki/Definiteness_of_a_matrix
assert is_pos_def(cov_matrix) and is_pos_def(inv_cov)
# Calculate Mahalanobis Distance https://en.wikipedia.org/wiki/Mahalanobis_distance
md = mahalanobis_dist(inv_cov, mean, df.values, verbose=False)
threshold = np.mean(md) * k
# res = pd.DataFrame(index=data.index,columns=data.columns)
return data[md > threshold]
# https://www.youtube.com/watch?v=spNpfmWZBmg&t=0s
def mahalanobis_dist(inv_cov_matrix, mean_distr, data, verbose=False):
diff = data - mean_distr
md = []
for i in range(len(diff)):
md.append(np.sqrt(diff[i].dot(inv_cov_matrix).dot(diff[i])))
return np.array(md)
# Check that matrix is positive definite
def is_pos_def(A):
if np.allclose(A, A.T):
try:
np.linalg.cholesky(A)
return True
except np.linalg.LinAlgError:
return False
else:
return False | en | 0.732611 | A function that detects outliers, where k is a tandard deviation threshold hyperparameter preferablly (2, 2.5, 3). The algo could handle multivariable data frames with any number of features d. For that manner, it first reduces the dimensionality to 2 using PCA, makes sure that the matrix is positive definite and calculates the Mahalanobis Distance with a threshold value. Returns a series of n rows back. # Calculate Principal Component Analysis # Calculate covariance and its inverse matrices # Check matrices are positive definite: https://en.wikipedia.org/wiki/Definiteness_of_a_matrix # Calculate Mahalanobis Distance https://en.wikipedia.org/wiki/Mahalanobis_distance # res = pd.DataFrame(index=data.index,columns=data.columns) # https://www.youtube.com/watch?v=spNpfmWZBmg&t=0s # Check that matrix is positive definite | 3.749184 | 4 |
arc113/b.py | nishio/atcoder | 1 | 9460 | <filename>arc113/b.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(SOLVE_PARAMS):
pass
def main():
A, B, C = map(int, input().split())
doubling = [B % 20]
for i in range(32):
doubling.append(
(doubling[-1] ** 2) % 20
)
BC = 1
for i in range(32):
if C % 2:
BC *= doubling[i]
BC %= 20
C //= 2
if BC == 0:
BC = 20
ret = (A % 10) ** BC
ret %= 10
print(ret)
# tests
T1 = """
4 3 2
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
4
"""
T2 = """
1 2 3
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
1
"""
T3 = """
3141592 6535897 9323846
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
2
"""
T4 = """
2 10 1
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
4
"""
T5 = """
2 20 1
"""
TEST_T5 = """
>>> as_input(T5)
>>> main()
6
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| <filename>arc113/b.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(SOLVE_PARAMS):
pass
def main():
A, B, C = map(int, input().split())
doubling = [B % 20]
for i in range(32):
doubling.append(
(doubling[-1] ** 2) % 20
)
BC = 1
for i in range(32):
if C % 2:
BC *= doubling[i]
BC %= 20
C //= 2
if BC == 0:
BC = 20
ret = (A % 10) ** BC
ret %= 10
print(ret)
# tests
T1 = """
4 3 2
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
4
"""
T2 = """
1 2 3
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
1
"""
T3 = """
3141592 6535897 9323846
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
2
"""
T4 = """
2 10 1
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
4
"""
T5 = """
2 20 1
"""
TEST_T5 = """
>>> as_input(T5)
>>> main()
6
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| en | 0.466126 | # included from snippets/main.py # tests 4 3 2 >>> as_input(T1) >>> main() 4 1 2 3 >>> as_input(T2) >>> main() 1 3141592 6535897 9323846 >>> as_input(T3) >>> main() 2 2 10 1 >>> as_input(T4) >>> main() 4 2 20 1 >>> as_input(T5) >>> main() 6 # end of snippets/main.py | 3.062367 | 3 |
pythonG/objects.py | ezan2000/Cssi_2018 | 0 | 9461 | ezan = {
'name': 'ezan',
'age': 18,
'hair': 'brown',
'cool': True ,
}
print(ezan)
class Person(object): #use class to make object
def __init__(
self, name, age ,hair, color, hungry) : #initialize
#first object inside of a class is self
self.name = 'ezan'
self.age = 18
self.hair = 'brown'
self.cool = True
def eat(self,food):
print("EAT {f}".format(f = food))
self.hungry = food
def play(self, game):
print("Play {p}".format(p = game))
self.play = game
def birth(self,person):
kids = Person(name = " lail", age = 18, hair = 'black', color = 'blue', hungry = True)
ezan = Person( name = "ezan", age = 18, hair = "black", cool = True, hungry = False)
print(ezan.name)
print('I am hungry')
Austin = Person(name = 'austin', age = 18, hair = "Shrek", cool = False, hungry = True)
| ezan = {
'name': 'ezan',
'age': 18,
'hair': 'brown',
'cool': True ,
}
print(ezan)
class Person(object): #use class to make object
def __init__(
self, name, age ,hair, color, hungry) : #initialize
#first object inside of a class is self
self.name = 'ezan'
self.age = 18
self.hair = 'brown'
self.cool = True
def eat(self,food):
print("EAT {f}".format(f = food))
self.hungry = food
def play(self, game):
print("Play {p}".format(p = game))
self.play = game
def birth(self,person):
kids = Person(name = " lail", age = 18, hair = 'black', color = 'blue', hungry = True)
ezan = Person( name = "ezan", age = 18, hair = "black", cool = True, hungry = False)
print(ezan.name)
print('I am hungry')
Austin = Person(name = 'austin', age = 18, hair = "Shrek", cool = False, hungry = True)
| en | 0.598228 | #use class to make object #initialize #first object inside of a class is self | 4.119486 | 4 |
62/main.py | pauvrepetit/leetcode | 0 | 9462 | <gh_stars>0
# 62. 不同路径
# 组合数,杨辉三角
yanghui = [[0 for i in range(202)] for j in range(202)]
def comb(n, k):
if yanghui[n][k] == 0:
yanghui[n][k] = (comb(n-1, k-1) + comb(n-1, k))
return yanghui[n][k]
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
for i in range(202):
yanghui[i][0] = 1
yanghui[i][i] = 1
return comb(m+n-2, min(m, n)-1) | # 62. 不同路径
# 组合数,杨辉三角
yanghui = [[0 for i in range(202)] for j in range(202)]
def comb(n, k):
if yanghui[n][k] == 0:
yanghui[n][k] = (comb(n-1, k-1) + comb(n-1, k))
return yanghui[n][k]
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
for i in range(202):
yanghui[i][0] = 1
yanghui[i][i] = 1
return comb(m+n-2, min(m, n)-1) | zh | 0.950997 | # 62. 不同路径 # 组合数,杨辉三角 | 3.265578 | 3 |
GermanOK/run.py | romainledru/GermanOK | 0 | 9463 | <filename>GermanOK/run.py
from Pages import *
app = App()
app.mainloop()
| <filename>GermanOK/run.py
from Pages import *
app = App()
app.mainloop()
| none | 1 | 1.258839 | 1 |
|
cauldron/cli/server/routes/ui_statuses.py | JohnnyPeng18/cauldron | 90 | 9464 | <filename>cauldron/cli/server/routes/ui_statuses.py
import flask
from cauldron.cli.server import run as server_runner
from cauldron.ui import arguments
from cauldron.ui import statuses
@server_runner.APPLICATION.route('/ui-status', methods=['POST'])
def ui_status():
args = arguments.from_request()
last_timestamp = args.get('last_timestamp', 0)
force = args.get('force', False)
results = statuses.get_status(last_timestamp, force)
return flask.jsonify(results)
| <filename>cauldron/cli/server/routes/ui_statuses.py
import flask
from cauldron.cli.server import run as server_runner
from cauldron.ui import arguments
from cauldron.ui import statuses
@server_runner.APPLICATION.route('/ui-status', methods=['POST'])
def ui_status():
args = arguments.from_request()
last_timestamp = args.get('last_timestamp', 0)
force = args.get('force', False)
results = statuses.get_status(last_timestamp, force)
return flask.jsonify(results)
| none | 1 | 1.975831 | 2 |
|
google_search.py | Jaram2019/minwoo | 0 | 9465 | <filename>google_search.py
import requests
from bs4 import BeautifulSoup
import re
rq = requests.get("https://play.google.com/store/apps/category/GAME_MUSIC?hl=ko")
rqctnt = rq.content
soup = BeautifulSoup(rqctnt,"html.parser")
soup = soup.find_all(attrs={'class':'title'})
blacklsit = ["앱","영화/TV","음악","도서","기기","엔터테인먼트","음악"]
for link in soup:
if link.text.strip() in blacklsit:
pass
else:
print(link.text.strip())
| <filename>google_search.py
import requests
from bs4 import BeautifulSoup
import re
rq = requests.get("https://play.google.com/store/apps/category/GAME_MUSIC?hl=ko")
rqctnt = rq.content
soup = BeautifulSoup(rqctnt,"html.parser")
soup = soup.find_all(attrs={'class':'title'})
blacklsit = ["앱","영화/TV","음악","도서","기기","엔터테인먼트","음악"]
for link in soup:
if link.text.strip() in blacklsit:
pass
else:
print(link.text.strip())
| none | 1 | 3.016088 | 3 |
|
pygall/tests/test_photos.py | bbinet/PyGall | 1 | 9466 | <filename>pygall/tests/test_photos.py
from unittest import TestCase
from pyramid import testing
class PhotosTests(TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
| <filename>pygall/tests/test_photos.py
from unittest import TestCase
from pyramid import testing
class PhotosTests(TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
| none | 1 | 1.800436 | 2 |
|
Chapter_4/lists_data_type.py | alenasf/AutomateTheBoringStuff | 0 | 9467 | #Negative Indexes
spam = ['cat', 'bat', 'rat', 'elephant']
spam[-1] # elepant
spam[-3] # bat
# Getting a List from another List with Slices
spam = ['cat', 'bat', 'rat', 'elephant']
spam[0:4] # ['cat', 'bat', 'rat', 'elephant']
spam[1:3] # ['bat', 'rat']
spam[0:-1] # ['cat', 'bat', 'rat']
spam[:2] # ['cat', 'bat']
spam[1:] # ['bat', 'rat', 'elephant']
spam[:] # ['cat', 'bat', 'rat', 'elephant']
# Getting a List's length with the len() Function
spam = ['cat', 'dog', 'moose']
len(spam) # 3
# Changing Values in a List with Indexes
spam = ['cat', 'bat', 'rat', 'elephant']
spam[1] = 'aardvark'
spam # ['cat', 'aardvark', 'rat', 'elephant']
spam[2]=spam[1]
spam # ['cat', 'aardvark', 'aardvark', 'elephant']
spam[-1] = 12345
spam # ['cat', 'aardvark', 'aardvark', 12345]
# List Concatenation and List Replication
[1, 2, 3] + ['A', 'B', 'C']
# [1, 2, 3, 'A', 'B', 'C']
['X', 'Y', 'Z'] * 3
#['X', 'Y', 'Z', 'X', 'Y', 'Z', 'X', 'Y', 'Z']
spam = [1, 2, 3]
spam = spam + ['A', 'B', 'C']
# [1, 2, 3, 'A', 'B', 'C']
# Removing Values From Lists with del Statements
spam = ['cat', 'bat', 'rat', 'elephant']
del spam[2]
spam # ['cat', 'bat', 'elephant']
del spam[2]
spam # ['cat', 'bat']
# Using for Loops with Lists
for i in range(4):
print(i)
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for i in range(len(supplies)):
print('Index ' + str(i) + ' in supplies is: ' + supplies[i])
# The in and not in Operators
'howdy' in ['hello', 'hi', 'howdy', 'heyas'] # True
spam = ['hello', 'hi', 'howdy', 'heyas']
'cat' in spam # False
'howdy' not in spam # False
# Type in a pet name and then check wether the name is in a list of pets
myPets = ['Zophie', 'Pooka', 'Fat-tail']
print('Enter a pet name:')
name = input()
if name not in myPets:
print('I do not have a pet named ' + name)
else:
print(name + ' is my pet.')
# The Multiple Assignment Trick
cat = ['fat', 'gray', 'loud']
size = cat[0]
color = cat[1]
disposition = cat[2]
# type this line
cat = ['fat', 'gray', 'loud']
size, color, disposition = cat
# Using the enumerate() Function with Lists
# enumerate() Function is useful when you need both the item and item's index in loop's block
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for index, item in enumerate(supplies):
print('Index ' + str(index) + ' in supplies is: ' + item)
# Using the random.choice() and random.shuffle() Function with Lists
import random
pets = ['Dog', 'Cat', 'Moose']
random.choice(pets)
random.choice(pets)
random.choice(pets)
# random.choice(someList) to be a shorter form of someList[random.randint(0, len(someList)-1)]
import random
people = ['Alice', 'Bob', 'Carol', 'David']
random.shuffle(people)
people # ['Bob', 'Carol', 'David', 'Alice']
random.shuffle(people)
people # random list of people
#Augmented Assignment Operators
spam += 1 # spam = spam + 1
spam -= 1 # spam = spam - 1
spam *= 1 # spam = spam * 1
spam /= 1 #spam = spam / 1
spam %= 1 #spam = spam % 1
| #Negative Indexes
spam = ['cat', 'bat', 'rat', 'elephant']
spam[-1] # elepant
spam[-3] # bat
# Getting a List from another List with Slices
spam = ['cat', 'bat', 'rat', 'elephant']
spam[0:4] # ['cat', 'bat', 'rat', 'elephant']
spam[1:3] # ['bat', 'rat']
spam[0:-1] # ['cat', 'bat', 'rat']
spam[:2] # ['cat', 'bat']
spam[1:] # ['bat', 'rat', 'elephant']
spam[:] # ['cat', 'bat', 'rat', 'elephant']
# Getting a List's length with the len() Function
spam = ['cat', 'dog', 'moose']
len(spam) # 3
# Changing Values in a List with Indexes
spam = ['cat', 'bat', 'rat', 'elephant']
spam[1] = 'aardvark'
spam # ['cat', 'aardvark', 'rat', 'elephant']
spam[2]=spam[1]
spam # ['cat', 'aardvark', 'aardvark', 'elephant']
spam[-1] = 12345
spam # ['cat', 'aardvark', 'aardvark', 12345]
# List Concatenation and List Replication
[1, 2, 3] + ['A', 'B', 'C']
# [1, 2, 3, 'A', 'B', 'C']
['X', 'Y', 'Z'] * 3
#['X', 'Y', 'Z', 'X', 'Y', 'Z', 'X', 'Y', 'Z']
spam = [1, 2, 3]
spam = spam + ['A', 'B', 'C']
# [1, 2, 3, 'A', 'B', 'C']
# Removing Values From Lists with del Statements
spam = ['cat', 'bat', 'rat', 'elephant']
del spam[2]
spam # ['cat', 'bat', 'elephant']
del spam[2]
spam # ['cat', 'bat']
# Using for Loops with Lists
for i in range(4):
print(i)
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for i in range(len(supplies)):
print('Index ' + str(i) + ' in supplies is: ' + supplies[i])
# The in and not in Operators
'howdy' in ['hello', 'hi', 'howdy', 'heyas'] # True
spam = ['hello', 'hi', 'howdy', 'heyas']
'cat' in spam # False
'howdy' not in spam # False
# Type in a pet name and then check wether the name is in a list of pets
myPets = ['Zophie', 'Pooka', 'Fat-tail']
print('Enter a pet name:')
name = input()
if name not in myPets:
print('I do not have a pet named ' + name)
else:
print(name + ' is my pet.')
# The Multiple Assignment Trick
cat = ['fat', 'gray', 'loud']
size = cat[0]
color = cat[1]
disposition = cat[2]
# type this line
cat = ['fat', 'gray', 'loud']
size, color, disposition = cat
# Using the enumerate() Function with Lists
# enumerate() Function is useful when you need both the item and item's index in loop's block
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for index, item in enumerate(supplies):
print('Index ' + str(index) + ' in supplies is: ' + item)
# Using the random.choice() and random.shuffle() Function with Lists
import random
pets = ['Dog', 'Cat', 'Moose']
random.choice(pets)
random.choice(pets)
random.choice(pets)
# random.choice(someList) to be a shorter form of someList[random.randint(0, len(someList)-1)]
import random
people = ['Alice', 'Bob', 'Carol', 'David']
random.shuffle(people)
people # ['Bob', 'Carol', 'David', 'Alice']
random.shuffle(people)
people # random list of people
#Augmented Assignment Operators
spam += 1 # spam = spam + 1
spam -= 1 # spam = spam - 1
spam *= 1 # spam = spam * 1
spam /= 1 #spam = spam / 1
spam %= 1 #spam = spam % 1
| en | 0.598289 | #Negative Indexes # elepant # bat # Getting a List from another List with Slices # ['cat', 'bat', 'rat', 'elephant'] # ['bat', 'rat'] # ['cat', 'bat', 'rat'] # ['cat', 'bat'] # ['bat', 'rat', 'elephant'] # ['cat', 'bat', 'rat', 'elephant'] # Getting a List's length with the len() Function # 3 # Changing Values in a List with Indexes # ['cat', 'aardvark', 'rat', 'elephant'] # ['cat', 'aardvark', 'aardvark', 'elephant'] # ['cat', 'aardvark', 'aardvark', 12345] # List Concatenation and List Replication # [1, 2, 3, 'A', 'B', 'C'] #['X', 'Y', 'Z', 'X', 'Y', 'Z', 'X', 'Y', 'Z'] # [1, 2, 3, 'A', 'B', 'C'] # Removing Values From Lists with del Statements # ['cat', 'bat', 'elephant'] # ['cat', 'bat'] # Using for Loops with Lists # The in and not in Operators # True # False # False # Type in a pet name and then check wether the name is in a list of pets # The Multiple Assignment Trick # type this line # Using the enumerate() Function with Lists # enumerate() Function is useful when you need both the item and item's index in loop's block # Using the random.choice() and random.shuffle() Function with Lists # random.choice(someList) to be a shorter form of someList[random.randint(0, len(someList)-1)] # ['Bob', 'Carol', 'David', 'Alice'] # random list of people #Augmented Assignment Operators # spam = spam + 1 # spam = spam - 1 # spam = spam * 1 #spam = spam / 1 #spam = spam % 1 | 3.747716 | 4 |
WebVisualizations/data.py | chuhaovince/Web-Design-Challenge | 0 | 9468 | import pandas as pd
path = "Resources/cities.csv"
data = pd.read_csv(path)
data_html = data.to_html("data.html", bold_rows = True) | import pandas as pd
path = "Resources/cities.csv"
data = pd.read_csv(path)
data_html = data.to_html("data.html", bold_rows = True) | none | 1 | 2.609541 | 3 |
|
qemu/scripts/codeconverter/codeconverter/test_patching.py | hyunjoy/scripts | 44 | 9469 | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# <NAME> <<EMAIL>>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from tempfile import NamedTemporaryFile
from .patching import FileInfo, FileMatch, Patch, FileList
from .regexps import *
class BasicPattern(FileMatch):
regexp = '[abc]{3}'
@property
def name(self):
return self.group(0)
def replacement(self) -> str:
# replace match with the middle character repeated 5 times
return self.group(0)[1].upper()*5
def test_pattern_patching():
of = NamedTemporaryFile('wt')
of.writelines(['one line\n',
'this pattern will be patched: defbbahij\n',
'third line\n',
'another pattern: jihaabfed'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
matches = f.matches_of_type(BasicPattern)
assert len(matches) == 2
p2 = matches[1]
# manually add patch, to see if .append() works:
f.patches.append(p2.append('XXX'))
# apply all patches:
f.gen_patches(matches)
patched = f.get_patched_content()
assert patched == ('one line\n'+
'this pattern will be patched: defBBBBBhij\n'+
'third line\n'+
'another pattern: jihAAAAAXXXfed')
class Function(FileMatch):
regexp = S(r'BEGIN\s+', NAMED('name', RE_IDENTIFIER), r'\n',
r'(.*\n)*?END\n')
class Statement(FileMatch):
regexp = S(r'^\s*', NAMED('name', RE_IDENTIFIER), r'\(\)\n')
def test_container_match():
of = NamedTemporaryFile('wt')
of.writelines(['statement1()\n',
'statement2()\n',
'BEGIN function1\n',
' statement3()\n',
' statement4()\n',
'END\n',
'BEGIN function2\n',
' statement5()\n',
' statement6()\n',
'END\n',
'statement7()\n'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
assert len(f.matches_of_type(Function)) == 2
print(' '.join(m.name for m in f.matches_of_type(Statement)))
assert len(f.matches_of_type(Statement)) == 7
f1 = f.find_match(Function, 'function1')
f2 = f.find_match(Function, 'function2')
st1 = f.find_match(Statement, 'statement1')
st2 = f.find_match(Statement, 'statement2')
st3 = f.find_match(Statement, 'statement3')
st4 = f.find_match(Statement, 'statement4')
st5 = f.find_match(Statement, 'statement5')
st6 = f.find_match(Statement, 'statement6')
st7 = f.find_match(Statement, 'statement7')
assert not f1.contains(st1)
assert not f1.contains(st2)
assert not f1.contains(st2)
assert f1.contains(st3)
assert f1.contains(st4)
assert not f1.contains(st5)
assert not f1.contains(st6)
assert not f1.contains(st7)
assert not f2.contains(st1)
assert not f2.contains(st2)
assert not f2.contains(st2)
assert not f2.contains(st3)
assert not f2.contains(st4)
assert f2.contains(st5)
assert f2.contains(st6)
assert not f2.contains(st7)
| # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# <NAME> <<EMAIL>>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from tempfile import NamedTemporaryFile
from .patching import FileInfo, FileMatch, Patch, FileList
from .regexps import *
class BasicPattern(FileMatch):
regexp = '[abc]{3}'
@property
def name(self):
return self.group(0)
def replacement(self) -> str:
# replace match with the middle character repeated 5 times
return self.group(0)[1].upper()*5
def test_pattern_patching():
of = NamedTemporaryFile('wt')
of.writelines(['one line\n',
'this pattern will be patched: defbbahij\n',
'third line\n',
'another pattern: jihaabfed'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
matches = f.matches_of_type(BasicPattern)
assert len(matches) == 2
p2 = matches[1]
# manually add patch, to see if .append() works:
f.patches.append(p2.append('XXX'))
# apply all patches:
f.gen_patches(matches)
patched = f.get_patched_content()
assert patched == ('one line\n'+
'this pattern will be patched: defBBBBBhij\n'+
'third line\n'+
'another pattern: jihAAAAAXXXfed')
class Function(FileMatch):
regexp = S(r'BEGIN\s+', NAMED('name', RE_IDENTIFIER), r'\n',
r'(.*\n)*?END\n')
class Statement(FileMatch):
regexp = S(r'^\s*', NAMED('name', RE_IDENTIFIER), r'\(\)\n')
def test_container_match():
of = NamedTemporaryFile('wt')
of.writelines(['statement1()\n',
'statement2()\n',
'BEGIN function1\n',
' statement3()\n',
' statement4()\n',
'END\n',
'BEGIN function2\n',
' statement5()\n',
' statement6()\n',
'END\n',
'statement7()\n'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
assert len(f.matches_of_type(Function)) == 2
print(' '.join(m.name for m in f.matches_of_type(Statement)))
assert len(f.matches_of_type(Statement)) == 7
f1 = f.find_match(Function, 'function1')
f2 = f.find_match(Function, 'function2')
st1 = f.find_match(Statement, 'statement1')
st2 = f.find_match(Statement, 'statement2')
st3 = f.find_match(Statement, 'statement3')
st4 = f.find_match(Statement, 'statement4')
st5 = f.find_match(Statement, 'statement5')
st6 = f.find_match(Statement, 'statement6')
st7 = f.find_match(Statement, 'statement7')
assert not f1.contains(st1)
assert not f1.contains(st2)
assert not f1.contains(st2)
assert f1.contains(st3)
assert f1.contains(st4)
assert not f1.contains(st5)
assert not f1.contains(st6)
assert not f1.contains(st7)
assert not f2.contains(st1)
assert not f2.contains(st2)
assert not f2.contains(st2)
assert not f2.contains(st3)
assert not f2.contains(st4)
assert f2.contains(st5)
assert f2.contains(st6)
assert not f2.contains(st7)
| en | 0.811475 | # Copyright (C) 2020 Red Hat Inc. # # Authors: # <NAME> <<EMAIL>> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # replace match with the middle character repeated 5 times # manually add patch, to see if .append() works: # apply all patches: | 2.523169 | 3 |
Traversy Media/Python Django Dev to Deployment/Python Fundamentals/Tuples and Sets.py | Anim-101/CourseHub | 3 | 9470 | # # Simple Tuple
# fruits = ('Apple', 'Orange', 'Mango')
# # Using Constructor
# fruits = tuple(('Apple', 'Orange', 'Mango'))
# # Getting a Single Value
# print(fruits[1])
# Trying to change based on position
# fruits[1] = 'Grape'
# Tuples with one value should have trailing comma
# fruits = ('Apple')
# fruits = ('Apple',)
# # Getting length of a tupel
# print(len(fruits))
# ## Set
fruits = {'Apple', 'Orange', 'Mango', 'Apple'}
# Checking if in Set
print('Apple' in fruits)
# Add to Set
fruits.add('Grape')
# Removing from Set
fruits.remove('Grape')
# Clearing Set
fruits.clear()
# Delete set
del fruits
print(fruits)
| # # Simple Tuple
# fruits = ('Apple', 'Orange', 'Mango')
# # Using Constructor
# fruits = tuple(('Apple', 'Orange', 'Mango'))
# # Getting a Single Value
# print(fruits[1])
# Trying to change based on position
# fruits[1] = 'Grape'
# Tuples with one value should have trailing comma
# fruits = ('Apple')
# fruits = ('Apple',)
# # Getting length of a tupel
# print(len(fruits))
# ## Set
fruits = {'Apple', 'Orange', 'Mango', 'Apple'}
# Checking if in Set
print('Apple' in fruits)
# Add to Set
fruits.add('Grape')
# Removing from Set
fruits.remove('Grape')
# Clearing Set
fruits.clear()
# Delete set
del fruits
print(fruits)
| en | 0.695713 | # # Simple Tuple # fruits = ('Apple', 'Orange', 'Mango') # # Using Constructor # fruits = tuple(('Apple', 'Orange', 'Mango')) # # Getting a Single Value # print(fruits[1]) # Trying to change based on position # fruits[1] = 'Grape' # Tuples with one value should have trailing comma # fruits = ('Apple') # fruits = ('Apple',) # # Getting length of a tupel # print(len(fruits)) # ## Set # Checking if in Set # Add to Set # Removing from Set # Clearing Set # Delete set | 4.086753 | 4 |
nerblackbox/modules/ner_training/metrics/ner_metrics.py | flxst/nerblackbox | 0 | 9471 | <filename>nerblackbox/modules/ner_training/metrics/ner_metrics.py
from dataclasses import dataclass
from dataclasses import asdict
from typing import List, Tuple, Callable
import numpy as np
from sklearn.metrics import accuracy_score as accuracy_sklearn
from sklearn.metrics import precision_score as precision_sklearn
from sklearn.metrics import recall_score as recall_sklearn
from sklearn.metrics import precision_recall_fscore_support as prf_sklearn
from sklearn.exceptions import UndefinedMetricWarning
import warnings
from seqeval.metrics import precision_score as precision_seqeval
from seqeval.metrics import recall_score as recall_seqeval
from seqeval.metrics import f1_score as f1_seqeval
from seqeval.scheme import IOB2, BILOU
from nerblackbox.modules.ner_training.annotation_tags.tags import Tags
class NerMetrics:
"""
On the token level, the tags are evaluated in the given annotation scheme (e.g. plain, BIO)
On the entity level, the tags are evaluated in the BIO scheme (after converting if needed)
"""
def __init__(
self,
true_flat,
pred_flat,
level,
scheme,
classes=None,
class_index=None,
verbose=False,
):
"""
:param true_flat: [np array] of shape [batch_size * seq_length]
:param pred_flat: [np array] of shape [batch_size * seq_length]
:param level: [str] 'token' or 'entity'
:param scheme: [str] e.g. 'plain', 'bio'
:param classes: [optional, list] of [str] labels to take into account for metrics -> if level = 'token'
:param class_index: [optional, int] index to take into account for metrics -> if level = 'entity'
:param verbose: [optional, bool] if True, show verbose output
"""
self.true_flat = true_flat # token -> plain. entity -> plain, bio, bilou
self.pred_flat = pred_flat # token -> plain. entity -> plain, bio, bilou
self.scheme = scheme # token -> plain. entity -> plain, bio, bilou
self.classes = classes
self.class_index = class_index
self.level = level
self.verbose = verbose
if self.scheme == "bilou":
self.scheme_entity = "bilou"
self.scheme_entity_seqeval = BILOU
else: # plain, bio
self.scheme_entity = "bio"
self.scheme_entity_seqeval = IOB2
self.results = Results()
self.failure_value = -1
assert self.level in [
"token",
"entity",
], f"ERROR! level = {self.level} unknown."
if self.level == "entity":
self.true_flat_bio: List[str] = Tags(self.true_flat,).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
self.pred_flat_bio: List[str] = Tags(self.pred_flat).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
# ASR
self.pred_flat_bio_corrected: List[str]
self.pred_flat_bio_corrected, self.results.asr_abidance = Tags(
self.pred_flat_bio
).restore_annotation_scheme_consistency(
scheme=self.scheme_entity
) # entity -> bio, bilou
def results_as_dict(self):
return asdict(self.results)
def compute(self, _metrics):
"""
computes selected metrics
----------------------------------------------------------
:param _metrics: [list] of [str], e.g. ['acc, 'precision']
:return: -
"""
warnings.filterwarnings("error")
if "acc" in _metrics:
self.accuracy()
if "precision" in _metrics or "recall" in _metrics or "f1" in _metrics:
self._compute_well_defined_classes()
if "precision" in _metrics or "f1" in _metrics:
self.precision()
if "recall" in _metrics or "f1" in _metrics:
self.recall()
if "f1" in _metrics:
self.f1_score()
if (
"asr_abidance" in _metrics
or "asr_precision" in _metrics
or "asr_recall" in _metrics
or "asr_f1" in _metrics
):
self.compute_asr_results()
warnings.resetwarnings()
def accuracy(self):
"""
computes accuracy of predictions (_np_logits) w.r.t. ground truth (_np_label_ids)
---------------------------------------------------------------------------------
:return: acc [np float]
"""
self.results.acc = accuracy_sklearn(
self.true_flat, self.pred_flat, normalize=True
)
def precision(self):
"""
computes precision (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
precision_micro [np array] for all examples
precision_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.precision_micro = self._token_evaluation(
evaluation_function=precision_sklearn, average="micro"
)
self.results.precision_macro = self._token_evaluation(
evaluation_function=precision_sklearn, average="macro"
)
elif self.level == "entity":
self.results.precision_micro = self._entity_evaluation_micro(
evaluation_function=precision_seqeval
)
self.results.precision_macro = self._entity_evaluation_macro(
evaluation_function=precision_seqeval,
)
def recall(self):
"""
computes recall (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
recall_micro [np array] for all examples
recall_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.recall_micro = self._token_evaluation(
evaluation_function=recall_sklearn, average="micro"
)
self.results.recall_macro = self._token_evaluation(
evaluation_function=recall_sklearn, average="macro"
)
elif self.level == "entity":
self.results.recall_micro = self._entity_evaluation_micro(
evaluation_function=recall_seqeval
)
self.results.recall_macro = self._entity_evaluation_macro(
evaluation_function=recall_seqeval
)
def f1_score(self):
"""
computes f1 score (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
f1_score_micro [np array] for all examples
f1_score_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.f1_micro = self._token_evaluation(
evaluation_function=prf_sklearn, average="micro"
)
self.results.f1_macro = self._token_evaluation(
evaluation_function=prf_sklearn, average="macro"
)
elif self.level == "entity":
self.results.f1_micro, self.results.f1_macro = self._entity_evaluation_f1(
evaluation_function=f1_seqeval,
)
def compute_asr_results(self):
"""
computes
- self.results.asr_precision_micro
- self.results.asr_recall_micro
- self.results.asr_f1_micro
"""
def _entity_evaluation_micro_asr(evaluation_function: Callable) -> float:
"""helper function"""
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio_corrected], # corrected !!!
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
self.results.asr_precision_micro = _entity_evaluation_micro_asr(
evaluation_function=precision_seqeval
)
self.results.asr_recall_micro = _entity_evaluation_micro_asr(
evaluation_function=recall_seqeval
)
self.results.asr_f1_micro = _entity_evaluation_micro_asr(
evaluation_function=f1_seqeval
)
def _token_evaluation(self, evaluation_function: Callable, average: str) -> float:
"""
compute precision/recall/f1 on token level
Args:
evaluation_function: precision_sklearn, recall_sklearn, prf_sklearn
average: 'micro' or 'macro'
Returns:
metric: precision/recall on token level, 'micro' or 'macro' averaged
"""
assert evaluation_function in [
precision_sklearn,
recall_sklearn,
prf_sklearn,
], f"evaluation function = {evaluation_function} unknown / not allowed."
assert average in ["micro", "macro"], f"average = {average} unknown."
if self.classes is None or len(self.classes) > 1: # "all" / "fil"
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
try:
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division="warn",
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
warn_for=("precision", "recall", "f-score"),
zero_division="warn",
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
def _entity_evaluation_micro(self, evaluation_function: Callable) -> float:
"""
compute precision/recall micro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
if self.class_index is None: # "fil"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
else: # "ind"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
except UndefinedMetricWarning:
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=0,
)[self.class_index]
except IndexError:
metric = self.failure_value
if metric == 0:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=1,
)[self.class_index]
if metric == 1:
metric = self.failure_value
except IndexError:
metric = self.failure_value
return metric
def _compute_well_defined_classes(self) -> None:
"""
Created Attributes:
results.classindices_macro: list of indices of well-defined classes in terms of precision, recall, f1
results.numberofclasses_macro: number of well-defined classes in terms of precision, recall, f1
"""
def _get_index_list(
evaluation_function: Callable, true_array, pred_array, scheme_seqeval=None
):
kwargs = (
{"mode": "strict", "scheme": scheme_seqeval}
if scheme_seqeval is not None
else {}
)
try:
metric_list = evaluation_function(
true_array,
pred_array,
average=None,
zero_division="warn",
**kwargs,
)
index_list = [i for i in range(len(metric_list))]
except UndefinedMetricWarning:
metric_list_all = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=0,
**kwargs,
)
index_list = list()
for index, metric_elem in enumerate(metric_list_all):
if metric_elem != 0:
index_list.append(index)
else:
metric_elem_alt = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=1,
**kwargs,
)[index]
if metric_elem_alt != 1:
index_list.append(index)
return index_list
if self.level == "token":
index_list_precision = _get_index_list(
evaluation_function=precision_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
index_list_recall = _get_index_list(
evaluation_function=recall_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
else:
index_list_precision = _get_index_list(
evaluation_function=precision_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
index_list_recall = _get_index_list(
evaluation_function=recall_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
self.results.classindices_macro = tuple(
[index for index in index_list_precision if index in index_list_recall]
)
if self.level == "token":
self.results.numberofclasses_macro = (
len(self.results.classindices_macro) - 1
) # disregard "O" label
else:
self.results.numberofclasses_macro = len(self.results.classindices_macro)
def _entity_evaluation_macro(
self,
evaluation_function: Callable,
) -> float:
"""
compute precision/recall macro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average="macro",
zero_division=0,
)
return metric
def _entity_evaluation_f1(
self, evaluation_function: Callable
) -> Tuple[float, float]:
"""
compute f1 micro or macro average on entity level
Args:
evaluation_function: f1_seqeval
Returns:
f1_micro: f1 on entity level, 'micro' averaged
f1_macro: f1 on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
f1_seqeval
], f"evaluation function = {evaluation_function} unknown / not allowed."
# ensure that precision and recall have been called:
# self.precision()
# self.recall()
# f1_micro
if (
self.results.precision_micro == self.failure_value
or self.results.recall_micro == self.failure_value
):
f1_micro = self.failure_value
else:
if self.class_index is None: # "fil"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
else: # "ind"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
# f1_macro
if (
self.results.precision_macro == self.failure_value
or self.results.recall_macro == self.failure_value
):
f1_macro = self.failure_value
else:
if self.class_index is None: # "fil"
metric_list = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
)
f1_macro = np.average(metric_list)
else: # "ind"
f1_macro = self.failure_value
return f1_micro, f1_macro
@dataclass
class Results:
acc: float = -1
precision_micro: float = -1
precision_macro: float = -1
recall_micro: float = -1
recall_macro: float = -1
f1_micro: float = -1
f1_macro: float = -1
classindices_macro: Tuple[float, ...] = ()
numberofclasses_macro: float = -1
asr_abidance: float = -1
asr_precision_micro: float = -1
asr_recall_micro: float = -1
asr_f1_micro: float = -1
| <filename>nerblackbox/modules/ner_training/metrics/ner_metrics.py
from dataclasses import dataclass
from dataclasses import asdict
from typing import List, Tuple, Callable
import numpy as np
from sklearn.metrics import accuracy_score as accuracy_sklearn
from sklearn.metrics import precision_score as precision_sklearn
from sklearn.metrics import recall_score as recall_sklearn
from sklearn.metrics import precision_recall_fscore_support as prf_sklearn
from sklearn.exceptions import UndefinedMetricWarning
import warnings
from seqeval.metrics import precision_score as precision_seqeval
from seqeval.metrics import recall_score as recall_seqeval
from seqeval.metrics import f1_score as f1_seqeval
from seqeval.scheme import IOB2, BILOU
from nerblackbox.modules.ner_training.annotation_tags.tags import Tags
class NerMetrics:
"""
On the token level, the tags are evaluated in the given annotation scheme (e.g. plain, BIO)
On the entity level, the tags are evaluated in the BIO scheme (after converting if needed)
"""
def __init__(
self,
true_flat,
pred_flat,
level,
scheme,
classes=None,
class_index=None,
verbose=False,
):
"""
:param true_flat: [np array] of shape [batch_size * seq_length]
:param pred_flat: [np array] of shape [batch_size * seq_length]
:param level: [str] 'token' or 'entity'
:param scheme: [str] e.g. 'plain', 'bio'
:param classes: [optional, list] of [str] labels to take into account for metrics -> if level = 'token'
:param class_index: [optional, int] index to take into account for metrics -> if level = 'entity'
:param verbose: [optional, bool] if True, show verbose output
"""
self.true_flat = true_flat # token -> plain. entity -> plain, bio, bilou
self.pred_flat = pred_flat # token -> plain. entity -> plain, bio, bilou
self.scheme = scheme # token -> plain. entity -> plain, bio, bilou
self.classes = classes
self.class_index = class_index
self.level = level
self.verbose = verbose
if self.scheme == "bilou":
self.scheme_entity = "bilou"
self.scheme_entity_seqeval = BILOU
else: # plain, bio
self.scheme_entity = "bio"
self.scheme_entity_seqeval = IOB2
self.results = Results()
self.failure_value = -1
assert self.level in [
"token",
"entity",
], f"ERROR! level = {self.level} unknown."
if self.level == "entity":
self.true_flat_bio: List[str] = Tags(self.true_flat,).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
self.pred_flat_bio: List[str] = Tags(self.pred_flat).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
# ASR
self.pred_flat_bio_corrected: List[str]
self.pred_flat_bio_corrected, self.results.asr_abidance = Tags(
self.pred_flat_bio
).restore_annotation_scheme_consistency(
scheme=self.scheme_entity
) # entity -> bio, bilou
def results_as_dict(self):
return asdict(self.results)
def compute(self, _metrics):
"""
computes selected metrics
----------------------------------------------------------
:param _metrics: [list] of [str], e.g. ['acc, 'precision']
:return: -
"""
warnings.filterwarnings("error")
if "acc" in _metrics:
self.accuracy()
if "precision" in _metrics or "recall" in _metrics or "f1" in _metrics:
self._compute_well_defined_classes()
if "precision" in _metrics or "f1" in _metrics:
self.precision()
if "recall" in _metrics or "f1" in _metrics:
self.recall()
if "f1" in _metrics:
self.f1_score()
if (
"asr_abidance" in _metrics
or "asr_precision" in _metrics
or "asr_recall" in _metrics
or "asr_f1" in _metrics
):
self.compute_asr_results()
warnings.resetwarnings()
def accuracy(self):
"""
computes accuracy of predictions (_np_logits) w.r.t. ground truth (_np_label_ids)
---------------------------------------------------------------------------------
:return: acc [np float]
"""
self.results.acc = accuracy_sklearn(
self.true_flat, self.pred_flat, normalize=True
)
def precision(self):
"""
computes precision (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
precision_micro [np array] for all examples
precision_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.precision_micro = self._token_evaluation(
evaluation_function=precision_sklearn, average="micro"
)
self.results.precision_macro = self._token_evaluation(
evaluation_function=precision_sklearn, average="macro"
)
elif self.level == "entity":
self.results.precision_micro = self._entity_evaluation_micro(
evaluation_function=precision_seqeval
)
self.results.precision_macro = self._entity_evaluation_macro(
evaluation_function=precision_seqeval,
)
def recall(self):
"""
computes recall (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
recall_micro [np array] for all examples
recall_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.recall_micro = self._token_evaluation(
evaluation_function=recall_sklearn, average="micro"
)
self.results.recall_macro = self._token_evaluation(
evaluation_function=recall_sklearn, average="macro"
)
elif self.level == "entity":
self.results.recall_micro = self._entity_evaluation_micro(
evaluation_function=recall_seqeval
)
self.results.recall_macro = self._entity_evaluation_macro(
evaluation_function=recall_seqeval
)
def f1_score(self):
"""
computes f1 score (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
f1_score_micro [np array] for all examples
f1_score_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.f1_micro = self._token_evaluation(
evaluation_function=prf_sklearn, average="micro"
)
self.results.f1_macro = self._token_evaluation(
evaluation_function=prf_sklearn, average="macro"
)
elif self.level == "entity":
self.results.f1_micro, self.results.f1_macro = self._entity_evaluation_f1(
evaluation_function=f1_seqeval,
)
def compute_asr_results(self):
"""
computes
- self.results.asr_precision_micro
- self.results.asr_recall_micro
- self.results.asr_f1_micro
"""
def _entity_evaluation_micro_asr(evaluation_function: Callable) -> float:
"""helper function"""
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio_corrected], # corrected !!!
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
self.results.asr_precision_micro = _entity_evaluation_micro_asr(
evaluation_function=precision_seqeval
)
self.results.asr_recall_micro = _entity_evaluation_micro_asr(
evaluation_function=recall_seqeval
)
self.results.asr_f1_micro = _entity_evaluation_micro_asr(
evaluation_function=f1_seqeval
)
def _token_evaluation(self, evaluation_function: Callable, average: str) -> float:
"""
compute precision/recall/f1 on token level
Args:
evaluation_function: precision_sklearn, recall_sklearn, prf_sklearn
average: 'micro' or 'macro'
Returns:
metric: precision/recall on token level, 'micro' or 'macro' averaged
"""
assert evaluation_function in [
precision_sklearn,
recall_sklearn,
prf_sklearn,
], f"evaluation function = {evaluation_function} unknown / not allowed."
assert average in ["micro", "macro"], f"average = {average} unknown."
if self.classes is None or len(self.classes) > 1: # "all" / "fil"
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
try:
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division="warn",
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
warn_for=("precision", "recall", "f-score"),
zero_division="warn",
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
def _entity_evaluation_micro(self, evaluation_function: Callable) -> float:
"""
compute precision/recall micro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
if self.class_index is None: # "fil"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
else: # "ind"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
except UndefinedMetricWarning:
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=0,
)[self.class_index]
except IndexError:
metric = self.failure_value
if metric == 0:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=1,
)[self.class_index]
if metric == 1:
metric = self.failure_value
except IndexError:
metric = self.failure_value
return metric
def _compute_well_defined_classes(self) -> None:
"""
Created Attributes:
results.classindices_macro: list of indices of well-defined classes in terms of precision, recall, f1
results.numberofclasses_macro: number of well-defined classes in terms of precision, recall, f1
"""
def _get_index_list(
evaluation_function: Callable, true_array, pred_array, scheme_seqeval=None
):
kwargs = (
{"mode": "strict", "scheme": scheme_seqeval}
if scheme_seqeval is not None
else {}
)
try:
metric_list = evaluation_function(
true_array,
pred_array,
average=None,
zero_division="warn",
**kwargs,
)
index_list = [i for i in range(len(metric_list))]
except UndefinedMetricWarning:
metric_list_all = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=0,
**kwargs,
)
index_list = list()
for index, metric_elem in enumerate(metric_list_all):
if metric_elem != 0:
index_list.append(index)
else:
metric_elem_alt = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=1,
**kwargs,
)[index]
if metric_elem_alt != 1:
index_list.append(index)
return index_list
if self.level == "token":
index_list_precision = _get_index_list(
evaluation_function=precision_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
index_list_recall = _get_index_list(
evaluation_function=recall_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
else:
index_list_precision = _get_index_list(
evaluation_function=precision_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
index_list_recall = _get_index_list(
evaluation_function=recall_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
self.results.classindices_macro = tuple(
[index for index in index_list_precision if index in index_list_recall]
)
if self.level == "token":
self.results.numberofclasses_macro = (
len(self.results.classindices_macro) - 1
) # disregard "O" label
else:
self.results.numberofclasses_macro = len(self.results.classindices_macro)
def _entity_evaluation_macro(
self,
evaluation_function: Callable,
) -> float:
"""
compute precision/recall macro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average="macro",
zero_division=0,
)
return metric
def _entity_evaluation_f1(
self, evaluation_function: Callable
) -> Tuple[float, float]:
"""
compute f1 micro or macro average on entity level
Args:
evaluation_function: f1_seqeval
Returns:
f1_micro: f1 on entity level, 'micro' averaged
f1_macro: f1 on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
f1_seqeval
], f"evaluation function = {evaluation_function} unknown / not allowed."
# ensure that precision and recall have been called:
# self.precision()
# self.recall()
# f1_micro
if (
self.results.precision_micro == self.failure_value
or self.results.recall_micro == self.failure_value
):
f1_micro = self.failure_value
else:
if self.class_index is None: # "fil"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
else: # "ind"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
# f1_macro
if (
self.results.precision_macro == self.failure_value
or self.results.recall_macro == self.failure_value
):
f1_macro = self.failure_value
else:
if self.class_index is None: # "fil"
metric_list = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
)
f1_macro = np.average(metric_list)
else: # "ind"
f1_macro = self.failure_value
return f1_micro, f1_macro
@dataclass
class Results:
acc: float = -1
precision_micro: float = -1
precision_macro: float = -1
recall_micro: float = -1
recall_macro: float = -1
f1_micro: float = -1
f1_macro: float = -1
classindices_macro: Tuple[float, ...] = ()
numberofclasses_macro: float = -1
asr_abidance: float = -1
asr_precision_micro: float = -1
asr_recall_micro: float = -1
asr_f1_micro: float = -1
| en | 0.530789 | On the token level, the tags are evaluated in the given annotation scheme (e.g. plain, BIO) On the entity level, the tags are evaluated in the BIO scheme (after converting if needed) :param true_flat: [np array] of shape [batch_size * seq_length] :param pred_flat: [np array] of shape [batch_size * seq_length] :param level: [str] 'token' or 'entity' :param scheme: [str] e.g. 'plain', 'bio' :param classes: [optional, list] of [str] labels to take into account for metrics -> if level = 'token' :param class_index: [optional, int] index to take into account for metrics -> if level = 'entity' :param verbose: [optional, bool] if True, show verbose output # token -> plain. entity -> plain, bio, bilou # token -> plain. entity -> plain, bio, bilou # token -> plain. entity -> plain, bio, bilou # plain, bio # entity -> bio, bilou # entity -> bio, bilou # ASR # entity -> bio, bilou computes selected metrics ---------------------------------------------------------- :param _metrics: [list] of [str], e.g. ['acc, 'precision'] :return: - computes accuracy of predictions (_np_logits) w.r.t. ground truth (_np_label_ids) --------------------------------------------------------------------------------- :return: acc [np float] computes precision (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat) Returns: precision_micro [np array] for all examples precision_macro [np array] for each class, then averaged computes recall (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat) Returns: recall_micro [np array] for all examples recall_macro [np array] for each class, then averaged computes f1 score (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat) Returns: f1_score_micro [np array] for all examples f1_score_macro [np array] for each class, then averaged computes - self.results.asr_precision_micro - self.results.asr_recall_micro - self.results.asr_f1_micro helper function # corrected !!! compute precision/recall/f1 on token level Args: evaluation_function: precision_sklearn, recall_sklearn, prf_sklearn average: 'micro' or 'macro' Returns: metric: precision/recall on token level, 'micro' or 'macro' averaged # "all" / "fil" compute precision/recall micro average on entity level Args: evaluation_function: precision_seqeval, recall_seqeval Returns: metric: precision/recall on entity level, 'macro' averaged # "fil" # "ind" Created Attributes: results.classindices_macro: list of indices of well-defined classes in terms of precision, recall, f1 results.numberofclasses_macro: number of well-defined classes in terms of precision, recall, f1 # disregard "O" label compute precision/recall macro average on entity level Args: evaluation_function: precision_seqeval, recall_seqeval Returns: metric: precision/recall on entity level, 'macro' averaged on well-defined classes compute f1 micro or macro average on entity level Args: evaluation_function: f1_seqeval Returns: f1_micro: f1 on entity level, 'micro' averaged f1_macro: f1 on entity level, 'macro' averaged on well-defined classes # ensure that precision and recall have been called: # self.precision() # self.recall() # f1_micro # "fil" # "ind" # f1_macro # "fil" # "ind" | 2.54057 | 3 |
Assignments/hw4/rank_feat_by_chi_square.py | spacemanidol/CLMS572 | 0 | 9472 | import sys
def readInput():
labels, features, all_features, labelCount = [], [], [], {}
l = sys.stdin.readline().strip().split(' ')
while len(l)> 1:
label = l[0]
if label not in labelCount:
labelCount[label] = 0
labelCount[label] += 1
labels.append(label)
currFeat = set()
for key in l[1:]:
feature, _ = key.split(':')
all_features.append(feature)
currFeat.add(feature)
features.append(currFeat)
l = sys.stdin.readline().strip().split(' ')
return [labels, features] , set(all_features), labelCount
def rankByChiSquared(data, features, labelCount):
labels = labelCount.keys()
dataLength = len(data[0])
n = sum(labelCount.values())
results, featureOccourences, featureNonOccourences = [], {}, {}
for feature in features:
for label in labels:
featureOccourences[label] = 0 #Initialize
for i in range(dataLength):
if feature in data[1][i]:
featureOccourences[data[0][i]] += 1 # We could how many times the feature occours in the data for each label
for label in labels:
featureNonOccourences[label] = labelCount[label] - featureOccourences[label] #count of the times it doesnt appear for each label
totalFeatureOccourences = sum(featureOccourences.values())
totalFeatureNonOccourences = sum(featureNonOccourences.values())
chi = sum([((featureOccourences[label]-(labelCount[label]*totalFeatureOccourences/n))**2/(labelCount[label]*totalFeatureOccourences/n) +(featureNonOccourences[label] - (labelCount[label] * totalFeatureNonOccourences/n))**2/(labelCount[label] * totalFeatureNonOccourences/n)) for label in labels]) #Chi squared calc
results.append([feature, chi, totalFeatureOccourences]) #save the re
[print('{} {:.5f} {}'.format(*score)) for score in sorted(results, key = lambda x:(-x[1], -x[2], x[0]), reverse=False)] #print features sorted by chi^2 value, count in text, alphabetically
if __name__ == "__main__":
data, all_features, labelCount= readInput()
results = rankByChiSquared(data, all_features, labelCount) | import sys
def readInput():
labels, features, all_features, labelCount = [], [], [], {}
l = sys.stdin.readline().strip().split(' ')
while len(l)> 1:
label = l[0]
if label not in labelCount:
labelCount[label] = 0
labelCount[label] += 1
labels.append(label)
currFeat = set()
for key in l[1:]:
feature, _ = key.split(':')
all_features.append(feature)
currFeat.add(feature)
features.append(currFeat)
l = sys.stdin.readline().strip().split(' ')
return [labels, features] , set(all_features), labelCount
def rankByChiSquared(data, features, labelCount):
labels = labelCount.keys()
dataLength = len(data[0])
n = sum(labelCount.values())
results, featureOccourences, featureNonOccourences = [], {}, {}
for feature in features:
for label in labels:
featureOccourences[label] = 0 #Initialize
for i in range(dataLength):
if feature in data[1][i]:
featureOccourences[data[0][i]] += 1 # We could how many times the feature occours in the data for each label
for label in labels:
featureNonOccourences[label] = labelCount[label] - featureOccourences[label] #count of the times it doesnt appear for each label
totalFeatureOccourences = sum(featureOccourences.values())
totalFeatureNonOccourences = sum(featureNonOccourences.values())
chi = sum([((featureOccourences[label]-(labelCount[label]*totalFeatureOccourences/n))**2/(labelCount[label]*totalFeatureOccourences/n) +(featureNonOccourences[label] - (labelCount[label] * totalFeatureNonOccourences/n))**2/(labelCount[label] * totalFeatureNonOccourences/n)) for label in labels]) #Chi squared calc
results.append([feature, chi, totalFeatureOccourences]) #save the re
[print('{} {:.5f} {}'.format(*score)) for score in sorted(results, key = lambda x:(-x[1], -x[2], x[0]), reverse=False)] #print features sorted by chi^2 value, count in text, alphabetically
if __name__ == "__main__":
data, all_features, labelCount= readInput()
results = rankByChiSquared(data, all_features, labelCount) | en | 0.851235 | #Initialize # We could how many times the feature occours in the data for each label #count of the times it doesnt appear for each label #Chi squared calc #save the re #print features sorted by chi^2 value, count in text, alphabetically | 3.063529 | 3 |
Files/joinfiles.py | LeoCruzG/4chan-thread-downloader | 0 | 9473 | # Importamos la librería para leer archivos json
import json
# Abrimos el archivo master en modo lectura ('r') con todos los id de los archivos descargados
with open('master.json', 'r') as f:
# Guardamos en la variable lista el contenido de master
lista = json.load(f)
# En este ejemplo se representa cómo se asignaría a la lista archivos específicos
#lista = ['2095303', '2169202']
# Abrimos el archivo tryall.json en modo lectura ('w'), si no está creado previamente
# se crea en este momento, se puede cambiar nombre a este archivo
with open('tryall.json', 'w') as outfile:
# Iniciamos un contador para ir marcando cuántos archivos llevamos unidos
contador = 0
# Esta variable ayuda a guardar el nombre del archivo anterior para
# corroborar si no se está repitiendo con el anterior
helper = 0
# Esta variable nos indica que tenemos que escribir dentro del documento lo que hay
# dentro del archivo actual
update = True
# Recorremos toda la lista de archivos descargados
for names in lista:
# Abrimos cada archivo
with open(f'{names}.json') as infile:
# Leemos los primeras 3 líneas
infile.readline()
infile.readline()
infile.readline()
# Guardamos el contenido de la 4° que tiene el número del thread
# en una variable temportal
temp = infile.readline()
# Comprobamos si helper tiene el mismo contenido que temp
if helper != temp:
# Si es diferente se puede hacer la actualización ya que no se va
# a tener threads repetidos
update = True
# asignamos el nuevo contenido a la variable persistente
helper = temp
# Si tienen el mismo contenido entonces no se hace la actualización
else:
update = False
# Abrimos nuevamente el archivo
with open(f'{names}.json') as infile:
# Si el post no está repetido entra
if update == True:
# Se escribe el contenido completo del thread en el archivo de salida
outfile.write(infile.read())
# Se aumenta el contador ya que se escribió un documento nuevo
contador+=1
# Se imporime el contador con el nombre del archivo leído
print(contador, names)
# Se pone un salto de página para escribir el contenido del archivo siguiente
outfile.write("\n") | # Importamos la librería para leer archivos json
import json
# Abrimos el archivo master en modo lectura ('r') con todos los id de los archivos descargados
with open('master.json', 'r') as f:
# Guardamos en la variable lista el contenido de master
lista = json.load(f)
# En este ejemplo se representa cómo se asignaría a la lista archivos específicos
#lista = ['2095303', '2169202']
# Abrimos el archivo tryall.json en modo lectura ('w'), si no está creado previamente
# se crea en este momento, se puede cambiar nombre a este archivo
with open('tryall.json', 'w') as outfile:
# Iniciamos un contador para ir marcando cuántos archivos llevamos unidos
contador = 0
# Esta variable ayuda a guardar el nombre del archivo anterior para
# corroborar si no se está repitiendo con el anterior
helper = 0
# Esta variable nos indica que tenemos que escribir dentro del documento lo que hay
# dentro del archivo actual
update = True
# Recorremos toda la lista de archivos descargados
for names in lista:
# Abrimos cada archivo
with open(f'{names}.json') as infile:
# Leemos los primeras 3 líneas
infile.readline()
infile.readline()
infile.readline()
# Guardamos el contenido de la 4° que tiene el número del thread
# en una variable temportal
temp = infile.readline()
# Comprobamos si helper tiene el mismo contenido que temp
if helper != temp:
# Si es diferente se puede hacer la actualización ya que no se va
# a tener threads repetidos
update = True
# asignamos el nuevo contenido a la variable persistente
helper = temp
# Si tienen el mismo contenido entonces no se hace la actualización
else:
update = False
# Abrimos nuevamente el archivo
with open(f'{names}.json') as infile:
# Si el post no está repetido entra
if update == True:
# Se escribe el contenido completo del thread en el archivo de salida
outfile.write(infile.read())
# Se aumenta el contador ya que se escribió un documento nuevo
contador+=1
# Se imporime el contador con el nombre del archivo leído
print(contador, names)
# Se pone un salto de página para escribir el contenido del archivo siguiente
outfile.write("\n") | es | 0.991897 | # Importamos la librería para leer archivos json # Abrimos el archivo master en modo lectura ('r') con todos los id de los archivos descargados # Guardamos en la variable lista el contenido de master # En este ejemplo se representa cómo se asignaría a la lista archivos específicos #lista = ['2095303', '2169202'] # Abrimos el archivo tryall.json en modo lectura ('w'), si no está creado previamente # se crea en este momento, se puede cambiar nombre a este archivo # Iniciamos un contador para ir marcando cuántos archivos llevamos unidos # Esta variable ayuda a guardar el nombre del archivo anterior para # corroborar si no se está repitiendo con el anterior # Esta variable nos indica que tenemos que escribir dentro del documento lo que hay # dentro del archivo actual # Recorremos toda la lista de archivos descargados # Abrimos cada archivo # Leemos los primeras 3 líneas # Guardamos el contenido de la 4° que tiene el número del thread # en una variable temportal # Comprobamos si helper tiene el mismo contenido que temp # Si es diferente se puede hacer la actualización ya que no se va # a tener threads repetidos # asignamos el nuevo contenido a la variable persistente # Si tienen el mismo contenido entonces no se hace la actualización # Abrimos nuevamente el archivo # Si el post no está repetido entra # Se escribe el contenido completo del thread en el archivo de salida # Se aumenta el contador ya que se escribió un documento nuevo # Se imporime el contador con el nombre del archivo leído # Se pone un salto de página para escribir el contenido del archivo siguiente | 2.906473 | 3 |
pycopula/archimedean_generators.py | SvenSerneels/pycopula | 2 | 9474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file contains the generators and their inverses for common archimedean copulas.
"""
import numpy as np
def boundsConditions(x):
if x < 0 or x > 1:
raise ValueError("Unable to compute generator for x equals to {}".format(x))
def claytonGenerator(x, theta):
boundsConditions(x)
if theta == 0:
raise ValueError("The parameter of a Clayton copula must not be equal to 0.")
if theta < -1:
raise ValueError("The parameter of a Clayton copula must be greater than -1 and different from 0.")
return (1. / theta) * (x**(-theta) - 1.)
def claytonGeneratorInvert(x, theta):
if theta == 0:
raise ValueError("The parameter of a Clayton copula must not be equal to 0.")
if theta < -1:
raise ValueError("The parameter of a Clayton copula must be greater than -1 and different from 0.")
return (1. + theta * x)**(-1. / max(theta,1e-6))
def gumbelGenerator(x, theta):
boundsConditions(x)
if theta < 1:
raise ValueError("The parameter of a Gumbel copula must be greater than 1.")
return (-np.log(x))**theta
def gumbelGeneratorInvert(x, theta):
if len(theta) > 1:
theta = theta[0]
if theta < 1:
raise ValueError("The parameter of a Gumbel copula must be greater than 1.")
if (x < 1 and theta != 1):
raise(ValueError("The inverse Gumbel generator cannot be evaluated for negative input and theta > 1"))
return np.exp(-np.power(x,np.divide(1, theta)))
def frankGenerator(x, theta):
boundsConditions(x)
if theta == 0:
raise ValueError("The parameter of a Frank copula must not be equal to 0.")
return -np.log((np.exp(-theta[0] * x) - 1) / (np.exp(-theta[0]) - 1))
def frankGeneratorInvert(x, theta):
if theta == 0:
raise ValueError("The parameter of a Frank copula must not be equal to 0.")
return -1. / theta * np.log(1. + np.exp(-x) * (np.exp(-theta) - 1.))
def joeGenerator(x, theta):
boundsConditions(x)
if theta < 1:
raise ValueError("The parameter of a Joe copula must be greater than 1.")
return -np.log(1. - (1. - x)**theta)
def joeGeneratorInvert(x, theta):
if theta < 1:
raise ValueError("The parameter of a Joe copula must be greater than 1.")
return 1. - (1. - np.exp(-x))**(1. / max(theta,1e-6))
def aliMikhailHaqGenerator(x, theta):
boundsConditions(x)
if theta < -1 or theta >= 1:
raise ValueError("The parameter of an Ali-Mikhail-Haq copula must be between -1 included and 1 excluded.")
return np.log((1. - theta * (1. - x)) / x)
def aliMikhailHaqGeneratorInvert(x, theta):
if theta < -1 or theta >= 1:
raise ValueError("The parameter of an Ali-Mikhail-Haq copula must be between -1 included and 1 excluded.")
return (1. - theta) / (np.exp(x) - theta)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file contains the generators and their inverses for common archimedean copulas.
"""
import numpy as np
def boundsConditions(x):
if x < 0 or x > 1:
raise ValueError("Unable to compute generator for x equals to {}".format(x))
def claytonGenerator(x, theta):
boundsConditions(x)
if theta == 0:
raise ValueError("The parameter of a Clayton copula must not be equal to 0.")
if theta < -1:
raise ValueError("The parameter of a Clayton copula must be greater than -1 and different from 0.")
return (1. / theta) * (x**(-theta) - 1.)
def claytonGeneratorInvert(x, theta):
if theta == 0:
raise ValueError("The parameter of a Clayton copula must not be equal to 0.")
if theta < -1:
raise ValueError("The parameter of a Clayton copula must be greater than -1 and different from 0.")
return (1. + theta * x)**(-1. / max(theta,1e-6))
def gumbelGenerator(x, theta):
boundsConditions(x)
if theta < 1:
raise ValueError("The parameter of a Gumbel copula must be greater than 1.")
return (-np.log(x))**theta
def gumbelGeneratorInvert(x, theta):
if len(theta) > 1:
theta = theta[0]
if theta < 1:
raise ValueError("The parameter of a Gumbel copula must be greater than 1.")
if (x < 1 and theta != 1):
raise(ValueError("The inverse Gumbel generator cannot be evaluated for negative input and theta > 1"))
return np.exp(-np.power(x,np.divide(1, theta)))
def frankGenerator(x, theta):
boundsConditions(x)
if theta == 0:
raise ValueError("The parameter of a Frank copula must not be equal to 0.")
return -np.log((np.exp(-theta[0] * x) - 1) / (np.exp(-theta[0]) - 1))
def frankGeneratorInvert(x, theta):
if theta == 0:
raise ValueError("The parameter of a Frank copula must not be equal to 0.")
return -1. / theta * np.log(1. + np.exp(-x) * (np.exp(-theta) - 1.))
def joeGenerator(x, theta):
boundsConditions(x)
if theta < 1:
raise ValueError("The parameter of a Joe copula must be greater than 1.")
return -np.log(1. - (1. - x)**theta)
def joeGeneratorInvert(x, theta):
if theta < 1:
raise ValueError("The parameter of a Joe copula must be greater than 1.")
return 1. - (1. - np.exp(-x))**(1. / max(theta,1e-6))
def aliMikhailHaqGenerator(x, theta):
boundsConditions(x)
if theta < -1 or theta >= 1:
raise ValueError("The parameter of an Ali-Mikhail-Haq copula must be between -1 included and 1 excluded.")
return np.log((1. - theta * (1. - x)) / x)
def aliMikhailHaqGeneratorInvert(x, theta):
if theta < -1 or theta >= 1:
raise ValueError("The parameter of an Ali-Mikhail-Haq copula must be between -1 included and 1 excluded.")
return (1. - theta) / (np.exp(x) - theta)
| en | 0.779784 | #!/usr/bin/env python # -*- coding: utf-8 -*- This file contains the generators and their inverses for common archimedean copulas. | 3.179865 | 3 |
app/admin/__init__.py | blackboard/BBDN-Base-Python-Flask | 0 | 9475 | <filename>app/admin/__init__.py<gh_stars>0
"""
"""
from admin import routes
def init_app(app):
"""
:param app:
:return:
"""
routes.init_app(app)
| <filename>app/admin/__init__.py<gh_stars>0
"""
"""
from admin import routes
def init_app(app):
"""
:param app:
:return:
"""
routes.init_app(app)
| en | 0.295678 | :param app: :return: | 1.782756 | 2 |
output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_pattern_2_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 9476 | <filename>output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_pattern_2_xsd/__init__.py
from output.models.nist_data.list_pkg.decimal.schema_instance.nistschema_sv_iv_list_decimal_pattern_2_xsd.nistschema_sv_iv_list_decimal_pattern_2 import NistschemaSvIvListDecimalPattern2
__all__ = [
"NistschemaSvIvListDecimalPattern2",
]
| <filename>output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_pattern_2_xsd/__init__.py
from output.models.nist_data.list_pkg.decimal.schema_instance.nistschema_sv_iv_list_decimal_pattern_2_xsd.nistschema_sv_iv_list_decimal_pattern_2 import NistschemaSvIvListDecimalPattern2
__all__ = [
"NistschemaSvIvListDecimalPattern2",
]
| none | 1 | 1.048786 | 1 |
|
fem/fem.py | Pengeace/DGP-PDE-FEM | 7 | 9477 | <reponame>Pengeace/DGP-PDE-FEM
import numpy as np
import pyamg
from scipy import sparse
from scipy.spatial import Delaunay
from linsolver import sparse_solver
from triangulation.delaunay import delaunay
class Element:
def __init__(self, points, global_indexes, fem):
self.points = np.array(points)
self.global_indexes = global_indexes
self.fem = fem
self.reference_triangle = np.array([[0, 0], [1., 0], [0, 1.]])
self.reference_grad = np.array([[-1., -1], [1., 0], [0, 1.]])
def perform_calculation(self):
self._calculate_transform()
self._calculate_stiffness_matrix()
self._calulate_load_vector()
def _calculate_transform(self):
reference_coord = np.array([self.reference_triangle[:, 0], self.reference_triangle[:, 1], [1] * 3])
transformed_coord = np.array([self.points[:, 0], self.points[:, 1], [1] * 3])
trans = np.dot(transformed_coord, np.linalg.inv(reference_coord))
self.transform_matrix = trans[0:-1, 0:-1]
self.area = abs(np.linalg.det(self.transform_matrix)) / 2
def _calculate_stiffness_matrix(self):
transform_matrix_inv = np.linalg.inv(self.transform_matrix)
self.element_stiffness_matrix = np.zeros((3, 3))
for row in range(3):
for col in range(3):
part_u_left_grad = np.dot(np.dot(self.fem.A, transform_matrix_inv.T), self.reference_grad[row])
part_u_right_grad = np.dot(transform_matrix_inv.T, self.reference_grad[col])
part_u_grad = self.area * np.dot(part_u_left_grad, part_u_right_grad)
part_u = (self.area / 6.0) if row == col else (self.area / 12.0)
self.element_stiffness_matrix[row, col] = part_u_grad + self.fem.q * part_u
def _calulate_load_vector(self):
mean_f = np.mean([self.fem.get_func_value(x) for x in self.points])
self.element_load_vector = np.array([mean_f * self.area / 3] * 3)
class FiniteElement:
"""
Finite Element Method to solve the 2D Elliptic Partial Differentiation differential Equation with below form:
div(A grad(u)) + q u = func
"""
def __init__(self, points, boundaries, A, q, func, slow_solver=True):
self.points = np.array(points)
self.dirichlet_boundaries = np.array(boundaries)
self.A = A
self.q = q
self.f = func
self.slow_solver = slow_solver
self.triangles = []
self.point_num = len(points)
def solve(self):
if len(self.triangles) == 0:
self._get_mesh()
self._process_each_element()
self._calculate_global_stiffness_matrix()
self._calulate_global_load_vector()
self._deal_with_dirichlet_bound()
self._solve_linear_equations()
def update_border_and_func(self, boundaries, func):
self.dirichlet_boundaries = np.array(boundaries)
self.f = func
def get_func_value(self, x):
if isinstance(self.f, dict):
return self.f[tuple(x)]
else:
return self.f(x)
def _get_mesh(self):
if self.slow_solver:
self.triangles = delaunay(self.points)
else:
triangulation = Delaunay(self.points)
self.triangles = triangulation.simplices
def _process_each_element(self):
self.elements = []
for tri in self.triangles:
ele = Element(points=[self.points[v] for v in tri], global_indexes=tri, fem=self)
ele.perform_calculation()
self.elements.append(ele)
def _calculate_global_stiffness_matrix(self):
self.global_stiffness_matrix_row = []
self.global_stiffness_matrix_col = []
self.global_stiffness_matrix_data = []
boundary_indexes = set(self.dirichlet_boundaries[:, 0].astype('int'))
for ele in self.elements:
for row in range(3):
if ele.global_indexes[row] not in boundary_indexes:
for col in range(3):
self.global_stiffness_matrix_row.append(ele.global_indexes[row])
self.global_stiffness_matrix_col.append(ele.global_indexes[col])
self.global_stiffness_matrix_data.append(ele.element_stiffness_matrix[row, col])
def _calulate_global_load_vector(self):
self.global_load_vector = np.zeros(self.point_num)
for ele in self.elements:
for v in range(3):
self.global_load_vector[ele.global_indexes[v]] += ele.element_load_vector[v]
def _deal_with_dirichlet_bound(self):
for index, val in self.dirichlet_boundaries:
index = int(index)
self.global_stiffness_matrix_row.append(index)
self.global_stiffness_matrix_col.append(index)
self.global_stiffness_matrix_data.append(1)
self.global_load_vector[index] = val
def _solve_linear_equations(self):
if not self.slow_solver:
self.global_stiffness_matrix_csr = sparse.coo_matrix((self.global_stiffness_matrix_data, (
self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr()
self.solution = pyamg.solve(self.global_stiffness_matrix_csr, self.global_load_vector, verb=False,
tol=1e-10)
else:
global_stiffness_sparse = [np.array(self.global_stiffness_matrix_row),
np.array(self.global_stiffness_matrix_col),
np.array(self.global_stiffness_matrix_data)]
self.solution = sparse_solver.sparse_gauss_seidel(global_stiffness_sparse, self.global_load_vector,
sparse_input=True)
## these solver methods are for test
# self.global_stiffness = sparse.coo_matrix((self.global_stiffness_matrix_data, (
# self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr()
# self.solution = linsolver.jacobi(self.global_stiffness.toarray(), self.global_load_vector)
# self.solution = linsolver.gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector)
# self.solution = sparse_solver.sparse_jacobi(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False)
# self.solution = sparse_solver.sparse_gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False)
if isinstance(self.solution, str):
print("The inputs for linear solver have problems.")
| import numpy as np
import pyamg
from scipy import sparse
from scipy.spatial import Delaunay
from linsolver import sparse_solver
from triangulation.delaunay import delaunay
class Element:
def __init__(self, points, global_indexes, fem):
self.points = np.array(points)
self.global_indexes = global_indexes
self.fem = fem
self.reference_triangle = np.array([[0, 0], [1., 0], [0, 1.]])
self.reference_grad = np.array([[-1., -1], [1., 0], [0, 1.]])
def perform_calculation(self):
self._calculate_transform()
self._calculate_stiffness_matrix()
self._calulate_load_vector()
def _calculate_transform(self):
reference_coord = np.array([self.reference_triangle[:, 0], self.reference_triangle[:, 1], [1] * 3])
transformed_coord = np.array([self.points[:, 0], self.points[:, 1], [1] * 3])
trans = np.dot(transformed_coord, np.linalg.inv(reference_coord))
self.transform_matrix = trans[0:-1, 0:-1]
self.area = abs(np.linalg.det(self.transform_matrix)) / 2
def _calculate_stiffness_matrix(self):
transform_matrix_inv = np.linalg.inv(self.transform_matrix)
self.element_stiffness_matrix = np.zeros((3, 3))
for row in range(3):
for col in range(3):
part_u_left_grad = np.dot(np.dot(self.fem.A, transform_matrix_inv.T), self.reference_grad[row])
part_u_right_grad = np.dot(transform_matrix_inv.T, self.reference_grad[col])
part_u_grad = self.area * np.dot(part_u_left_grad, part_u_right_grad)
part_u = (self.area / 6.0) if row == col else (self.area / 12.0)
self.element_stiffness_matrix[row, col] = part_u_grad + self.fem.q * part_u
def _calulate_load_vector(self):
mean_f = np.mean([self.fem.get_func_value(x) for x in self.points])
self.element_load_vector = np.array([mean_f * self.area / 3] * 3)
class FiniteElement:
"""
Finite Element Method to solve the 2D Elliptic Partial Differentiation differential Equation with below form:
div(A grad(u)) + q u = func
"""
def __init__(self, points, boundaries, A, q, func, slow_solver=True):
self.points = np.array(points)
self.dirichlet_boundaries = np.array(boundaries)
self.A = A
self.q = q
self.f = func
self.slow_solver = slow_solver
self.triangles = []
self.point_num = len(points)
def solve(self):
if len(self.triangles) == 0:
self._get_mesh()
self._process_each_element()
self._calculate_global_stiffness_matrix()
self._calulate_global_load_vector()
self._deal_with_dirichlet_bound()
self._solve_linear_equations()
def update_border_and_func(self, boundaries, func):
self.dirichlet_boundaries = np.array(boundaries)
self.f = func
def get_func_value(self, x):
if isinstance(self.f, dict):
return self.f[tuple(x)]
else:
return self.f(x)
def _get_mesh(self):
if self.slow_solver:
self.triangles = delaunay(self.points)
else:
triangulation = Delaunay(self.points)
self.triangles = triangulation.simplices
def _process_each_element(self):
self.elements = []
for tri in self.triangles:
ele = Element(points=[self.points[v] for v in tri], global_indexes=tri, fem=self)
ele.perform_calculation()
self.elements.append(ele)
def _calculate_global_stiffness_matrix(self):
self.global_stiffness_matrix_row = []
self.global_stiffness_matrix_col = []
self.global_stiffness_matrix_data = []
boundary_indexes = set(self.dirichlet_boundaries[:, 0].astype('int'))
for ele in self.elements:
for row in range(3):
if ele.global_indexes[row] not in boundary_indexes:
for col in range(3):
self.global_stiffness_matrix_row.append(ele.global_indexes[row])
self.global_stiffness_matrix_col.append(ele.global_indexes[col])
self.global_stiffness_matrix_data.append(ele.element_stiffness_matrix[row, col])
def _calulate_global_load_vector(self):
self.global_load_vector = np.zeros(self.point_num)
for ele in self.elements:
for v in range(3):
self.global_load_vector[ele.global_indexes[v]] += ele.element_load_vector[v]
def _deal_with_dirichlet_bound(self):
for index, val in self.dirichlet_boundaries:
index = int(index)
self.global_stiffness_matrix_row.append(index)
self.global_stiffness_matrix_col.append(index)
self.global_stiffness_matrix_data.append(1)
self.global_load_vector[index] = val
def _solve_linear_equations(self):
if not self.slow_solver:
self.global_stiffness_matrix_csr = sparse.coo_matrix((self.global_stiffness_matrix_data, (
self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr()
self.solution = pyamg.solve(self.global_stiffness_matrix_csr, self.global_load_vector, verb=False,
tol=1e-10)
else:
global_stiffness_sparse = [np.array(self.global_stiffness_matrix_row),
np.array(self.global_stiffness_matrix_col),
np.array(self.global_stiffness_matrix_data)]
self.solution = sparse_solver.sparse_gauss_seidel(global_stiffness_sparse, self.global_load_vector,
sparse_input=True)
## these solver methods are for test
# self.global_stiffness = sparse.coo_matrix((self.global_stiffness_matrix_data, (
# self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr()
# self.solution = linsolver.jacobi(self.global_stiffness.toarray(), self.global_load_vector)
# self.solution = linsolver.gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector)
# self.solution = sparse_solver.sparse_jacobi(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False)
# self.solution = sparse_solver.sparse_gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False)
if isinstance(self.solution, str):
print("The inputs for linear solver have problems.") | en | 0.444247 | Finite Element Method to solve the 2D Elliptic Partial Differentiation differential Equation with below form: div(A grad(u)) + q u = func ## these solver methods are for test # self.global_stiffness = sparse.coo_matrix((self.global_stiffness_matrix_data, ( # self.global_stiffness_matrix_row, self.global_stiffness_matrix_col))).tocsr() # self.solution = linsolver.jacobi(self.global_stiffness.toarray(), self.global_load_vector) # self.solution = linsolver.gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector) # self.solution = sparse_solver.sparse_jacobi(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False) # self.solution = sparse_solver.sparse_gauss_seidel(self.global_stiffness.toarray(), self.global_load_vector, sparse_input=False) | 2.342346 | 2 |
custom_components/tahoma/climate_devices/dimmer_exterior_heating.py | MatthewFlamm/ha-tahoma | 0 | 9478 | <gh_stars>0
"""Support for Atlantic Electrical Heater IO controller."""
import logging
from typing import List
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from ..coordinator import TahomaDataUpdateCoordinator
from ..tahoma_entity import TahomaEntity
_LOGGER = logging.getLogger(__name__)
COMMAND_GET_LEVEL = "getLevel"
COMMAND_SET_LEVEL = "setLevel"
CORE_LEVEL_STATE = "core:LevelState"
class DimmerExteriorHeating(TahomaEntity, ClimateEntity):
"""Representation of TaHoma IO Atlantic Electrical Heater."""
def __init__(self, device_url: str, coordinator: TahomaDataUpdateCoordinator):
"""Init method."""
super().__init__(device_url, coordinator)
self._saved_level = 100 - self.select_state(CORE_LEVEL_STATE)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def min_temp(self) -> float:
"""Return minimum percentage."""
return 0
@property
def max_temp(self) -> float:
"""Return maximum percentage."""
return 100
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return 100 - self.select_state(CORE_LEVEL_STATE)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
level = kwargs.get(ATTR_TEMPERATURE)
if level is None:
return
await self.async_execute_command(COMMAND_SET_LEVEL, 100 - int(level))
await self.async_execute_command(COMMAND_GET_LEVEL)
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
if self.select_state(CORE_LEVEL_STATE) == 100:
return HVAC_MODE_OFF
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_OFF, HVAC_MODE_HEAT]
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
level = 0
if hvac_mode == HVAC_MODE_HEAT:
level = self._saved_level
else:
self._saved_level = self.target_temperature
await self.async_execute_command(COMMAND_SET_LEVEL, 100 - int(level))
await self.async_execute_command(COMMAND_GET_LEVEL)
| """Support for Atlantic Electrical Heater IO controller."""
import logging
from typing import List
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from ..coordinator import TahomaDataUpdateCoordinator
from ..tahoma_entity import TahomaEntity
_LOGGER = logging.getLogger(__name__)
COMMAND_GET_LEVEL = "getLevel"
COMMAND_SET_LEVEL = "setLevel"
CORE_LEVEL_STATE = "core:LevelState"
class DimmerExteriorHeating(TahomaEntity, ClimateEntity):
"""Representation of TaHoma IO Atlantic Electrical Heater."""
def __init__(self, device_url: str, coordinator: TahomaDataUpdateCoordinator):
"""Init method."""
super().__init__(device_url, coordinator)
self._saved_level = 100 - self.select_state(CORE_LEVEL_STATE)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def min_temp(self) -> float:
"""Return minimum percentage."""
return 0
@property
def max_temp(self) -> float:
"""Return maximum percentage."""
return 100
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return 100 - self.select_state(CORE_LEVEL_STATE)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
level = kwargs.get(ATTR_TEMPERATURE)
if level is None:
return
await self.async_execute_command(COMMAND_SET_LEVEL, 100 - int(level))
await self.async_execute_command(COMMAND_GET_LEVEL)
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
if self.select_state(CORE_LEVEL_STATE) == 100:
return HVAC_MODE_OFF
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_OFF, HVAC_MODE_HEAT]
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
level = 0
if hvac_mode == HVAC_MODE_HEAT:
level = self._saved_level
else:
self._saved_level = self.target_temperature
await self.async_execute_command(COMMAND_SET_LEVEL, 100 - int(level))
await self.async_execute_command(COMMAND_GET_LEVEL) | en | 0.654744 | Support for Atlantic Electrical Heater IO controller. Representation of TaHoma IO Atlantic Electrical Heater. Init method. Return the list of supported features. Return the unit of measurement used by the platform. Return minimum percentage. Return maximum percentage. Return the temperature we try to reach. Set new target temperature. Return hvac operation ie. heat, cool mode. Return the list of available hvac operation modes. Set new target hvac mode. | 2.692704 | 3 |
elit/components/mtl/attn/joint_encoder.py | emorynlp/stem-cell-hypothesis | 4 | 9479 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-03-02 13:32
from typing import Optional, Union, Dict, Any
import torch
from torch import nn
from transformers import PreTrainedTokenizer
from elit.components.mtl.attn.attn import TaskAttention
from elit.components.mtl.attn.transformer import JointEncoder
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbeddingModule, ContextualWordEmbedding
from elit.layers.scalar_mix import ScalarMixWithDropoutBuilder
from elit.layers.transformers.utils import pick_tensor_for_each_token
class JointContextualWordEmbeddingModule(ContextualWordEmbeddingModule):
def __init__(self, field: str, transformer: str, transformer_tokenizer: PreTrainedTokenizer, average_subwords=False,
scalar_mix: Union[ScalarMixWithDropoutBuilder, int] = None, word_dropout=None,
max_sequence_length=None, ret_raw_hidden_states=False, transformer_args: Dict[str, Any] = None,
trainable=True, training=True) -> None:
super().__init__(field, transformer, transformer_tokenizer, average_subwords, scalar_mix, word_dropout,
max_sequence_length, ret_raw_hidden_states, transformer_args, trainable, training)
self.adapter: TaskAttention = None
def forward(self, batch: dict, mask=None, **kwargs):
input_ids: torch.LongTensor = batch[f'{self.field}_input_ids']
if self.max_sequence_length and input_ids.size(-1) > self.max_sequence_length:
raise NotImplementedError('Sentence length exceeded and sliding window has not been implemented yet')
token_span: torch.LongTensor = batch.get(f'{self.field}_token_span', None)
token_type_ids: torch.LongTensor = batch.get(f'{self.field}_token_type_ids', None)
attention_mask = input_ids.ne(0)
if self.word_dropout:
input_ids = self.word_dropout(input_ids)
# noinspection PyTypeChecker
transformer: JointEncoder = self.transformer
encoder_outputs = transformer(input_ids, attention_mask, token_type_ids)
outputs = dict()
for task_name, encoder_output in encoder_outputs.items():
encoder_output = encoder_output[0]
outputs[task_name] = pick_tensor_for_each_token(encoder_output, token_span, self.average_subwords)
return outputs
class JointContextualWordEmbedding(ContextualWordEmbedding):
def module(self, training=True, **kwargs) -> Optional[nn.Module]:
return JointContextualWordEmbeddingModule(self.field,
self.transformer,
self._transformer_tokenizer,
self.average_subwords,
self.scalar_mix,
self.word_dropout,
self.max_sequence_length,
self.ret_raw_hidden_states,
self.transformer_args,
self.trainable,
training=training)
| # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-03-02 13:32
from typing import Optional, Union, Dict, Any
import torch
from torch import nn
from transformers import PreTrainedTokenizer
from elit.components.mtl.attn.attn import TaskAttention
from elit.components.mtl.attn.transformer import JointEncoder
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbeddingModule, ContextualWordEmbedding
from elit.layers.scalar_mix import ScalarMixWithDropoutBuilder
from elit.layers.transformers.utils import pick_tensor_for_each_token
class JointContextualWordEmbeddingModule(ContextualWordEmbeddingModule):
def __init__(self, field: str, transformer: str, transformer_tokenizer: PreTrainedTokenizer, average_subwords=False,
scalar_mix: Union[ScalarMixWithDropoutBuilder, int] = None, word_dropout=None,
max_sequence_length=None, ret_raw_hidden_states=False, transformer_args: Dict[str, Any] = None,
trainable=True, training=True) -> None:
super().__init__(field, transformer, transformer_tokenizer, average_subwords, scalar_mix, word_dropout,
max_sequence_length, ret_raw_hidden_states, transformer_args, trainable, training)
self.adapter: TaskAttention = None
def forward(self, batch: dict, mask=None, **kwargs):
input_ids: torch.LongTensor = batch[f'{self.field}_input_ids']
if self.max_sequence_length and input_ids.size(-1) > self.max_sequence_length:
raise NotImplementedError('Sentence length exceeded and sliding window has not been implemented yet')
token_span: torch.LongTensor = batch.get(f'{self.field}_token_span', None)
token_type_ids: torch.LongTensor = batch.get(f'{self.field}_token_type_ids', None)
attention_mask = input_ids.ne(0)
if self.word_dropout:
input_ids = self.word_dropout(input_ids)
# noinspection PyTypeChecker
transformer: JointEncoder = self.transformer
encoder_outputs = transformer(input_ids, attention_mask, token_type_ids)
outputs = dict()
for task_name, encoder_output in encoder_outputs.items():
encoder_output = encoder_output[0]
outputs[task_name] = pick_tensor_for_each_token(encoder_output, token_span, self.average_subwords)
return outputs
class JointContextualWordEmbedding(ContextualWordEmbedding):
def module(self, training=True, **kwargs) -> Optional[nn.Module]:
return JointContextualWordEmbeddingModule(self.field,
self.transformer,
self._transformer_tokenizer,
self.average_subwords,
self.scalar_mix,
self.word_dropout,
self.max_sequence_length,
self.ret_raw_hidden_states,
self.transformer_args,
self.trainable,
training=training)
| en | 0.533939 | # -*- coding:utf-8 -*- # Author: hankcs # Date: 2021-03-02 13:32 # noinspection PyTypeChecker | 2.062613 | 2 |
simulation/sensors/__init__.py | salinsiim/petssa-simulation | 0 | 9480 | from sensors.sensors import sense_characteristics, sense_pedestrians | from sensors.sensors import sense_characteristics, sense_pedestrians | none | 1 | 1.074546 | 1 |
|
jaxrl/agents/sac_v1/sac_v1_learner.py | anuragajay/jaxrl | 157 | 9481 | """Implementations of algorithms for continuous control."""
import functools
from typing import Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import optax
from jaxrl.agents.sac import temperature
from jaxrl.agents.sac.actor import update as update_actor
from jaxrl.agents.sac.critic import target_update
from jaxrl.agents.sac_v1.critic import update_q, update_v
from jaxrl.datasets import Batch
from jaxrl.networks import critic_net, policies
from jaxrl.networks.common import InfoDict, Model, PRNGKey
@functools.partial(jax.jit, static_argnames=('update_target'))
def _update_jit(
rng: PRNGKey, actor: Model, critic: Model, value: Model,
target_value: Model, temp: Model, batch: Batch, discount: float,
tau: float, target_entropy: float, update_target: bool
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_critic, critic_info = update_q(critic, target_value, batch, discount)
rng, key = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, new_critic, temp, batch)
rng, key = jax.random.split(rng)
new_value, value_info = update_v(key, new_actor, new_critic, value, temp,
batch, True)
if update_target:
new_target_value = target_update(new_value, target_value, tau)
else:
new_target_value = target_value
new_temp, alpha_info = temperature.update(temp, actor_info['entropy'],
target_entropy)
return rng, new_actor, new_critic, new_value, new_target_value, new_temp, {
**critic_info,
**value_info,
**actor_info,
**alpha_info
}
class SACV1Learner(object):
def __init__(self,
seed: int,
observations: jnp.ndarray,
actions: jnp.ndarray,
actor_lr: float = 3e-4,
value_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
target_update_period: int = 1,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290
"""
action_dim = actions.shape[-1]
if target_entropy is None:
self.target_entropy = -action_dim / 2
else:
self.target_entropy = target_entropy
self.tau = tau
self.target_update_period = target_update_period
self.discount = discount
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_def = policies.NormalTanhPolicy(hidden_dims, action_dim)
actor = Model.create(actor_def,
inputs=[actor_key, observations],
tx=optax.adam(learning_rate=actor_lr))
critic_def = critic_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def,
inputs=[critic_key, observations, actions],
tx=optax.adam(learning_rate=critic_lr))
value_def = critic_net.ValueCritic(hidden_dims)
value = Model.create(value_def,
inputs=[critic_key, observations],
tx=optax.adam(learning_rate=value_lr))
target_value = Model.create(value_def,
inputs=[critic_key, observations])
temp = Model.create(temperature.Temperature(init_temperature),
inputs=[temp_key],
tx=optax.adam(learning_rate=temp_lr))
self.actor = actor
self.critic = critic
self.value = value
self.target_value = target_value
self.temp = temp
self.rng = rng
self.step = 1
def sample_actions(self,
observations: np.ndarray,
temperature: float = 1.0) -> jnp.ndarray:
rng, actions = policies.sample_actions(self.rng, self.actor.apply_fn,
self.actor.params, observations,
temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
self.step += 1
new_rng, new_actor, new_critic, new_value, new_target_value, new_temp, info = _update_jit(
self.rng, self.actor, self.critic, self.value, self.target_value,
self.temp, batch, self.discount, self.tau, self.target_entropy,
self.step % self.target_update_period == 0)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_value = new_target_value
self.temp = new_temp
return info
| """Implementations of algorithms for continuous control."""
import functools
from typing import Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import optax
from jaxrl.agents.sac import temperature
from jaxrl.agents.sac.actor import update as update_actor
from jaxrl.agents.sac.critic import target_update
from jaxrl.agents.sac_v1.critic import update_q, update_v
from jaxrl.datasets import Batch
from jaxrl.networks import critic_net, policies
from jaxrl.networks.common import InfoDict, Model, PRNGKey
@functools.partial(jax.jit, static_argnames=('update_target'))
def _update_jit(
rng: PRNGKey, actor: Model, critic: Model, value: Model,
target_value: Model, temp: Model, batch: Batch, discount: float,
tau: float, target_entropy: float, update_target: bool
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_critic, critic_info = update_q(critic, target_value, batch, discount)
rng, key = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, new_critic, temp, batch)
rng, key = jax.random.split(rng)
new_value, value_info = update_v(key, new_actor, new_critic, value, temp,
batch, True)
if update_target:
new_target_value = target_update(new_value, target_value, tau)
else:
new_target_value = target_value
new_temp, alpha_info = temperature.update(temp, actor_info['entropy'],
target_entropy)
return rng, new_actor, new_critic, new_value, new_target_value, new_temp, {
**critic_info,
**value_info,
**actor_info,
**alpha_info
}
class SACV1Learner(object):
def __init__(self,
seed: int,
observations: jnp.ndarray,
actions: jnp.ndarray,
actor_lr: float = 3e-4,
value_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
target_update_period: int = 1,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290
"""
action_dim = actions.shape[-1]
if target_entropy is None:
self.target_entropy = -action_dim / 2
else:
self.target_entropy = target_entropy
self.tau = tau
self.target_update_period = target_update_period
self.discount = discount
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_def = policies.NormalTanhPolicy(hidden_dims, action_dim)
actor = Model.create(actor_def,
inputs=[actor_key, observations],
tx=optax.adam(learning_rate=actor_lr))
critic_def = critic_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def,
inputs=[critic_key, observations, actions],
tx=optax.adam(learning_rate=critic_lr))
value_def = critic_net.ValueCritic(hidden_dims)
value = Model.create(value_def,
inputs=[critic_key, observations],
tx=optax.adam(learning_rate=value_lr))
target_value = Model.create(value_def,
inputs=[critic_key, observations])
temp = Model.create(temperature.Temperature(init_temperature),
inputs=[temp_key],
tx=optax.adam(learning_rate=temp_lr))
self.actor = actor
self.critic = critic
self.value = value
self.target_value = target_value
self.temp = temp
self.rng = rng
self.step = 1
def sample_actions(self,
observations: np.ndarray,
temperature: float = 1.0) -> jnp.ndarray:
rng, actions = policies.sample_actions(self.rng, self.actor.apply_fn,
self.actor.params, observations,
temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
self.step += 1
new_rng, new_actor, new_critic, new_value, new_target_value, new_temp, info = _update_jit(
self.rng, self.actor, self.critic, self.value, self.target_value,
self.temp, batch, self.discount, self.tau, self.target_entropy,
self.step % self.target_update_period == 0)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_value = new_target_value
self.temp = new_temp
return info
| en | 0.776898 | Implementations of algorithms for continuous control. An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290 | 2.141357 | 2 |
rbc/libfuncs.py | plures/rbc | 1 | 9482 | """Collections of library function names.
"""
class Library:
"""Base class for a collection of library function names.
"""
@staticmethod
def get(libname, _cache={}):
if libname in _cache:
return _cache[libname]
if libname == 'stdlib':
r = Stdlib()
elif libname == 'stdio':
r = Stdio()
elif libname == 'm':
r = Mlib()
elif libname == 'libdevice':
r = Libdevice()
elif libname == 'nvvm':
r = NVVMIntrinsics()
elif libname == 'llvm':
r = LLVMIntrinsics()
elif libname == 'heavydb':
r = HeavyDB()
else:
raise ValueError(f'Unknown library {libname}')
_cache[libname] = r
return r
def __contains__(self, fname):
return self.check(fname)
def check(self, fname):
"""
Return True if library contains a function with given name.
"""
if fname in self._function_names:
return True
for func in self._function_names:
if func.endswith('.*') and fname.startswith(func[:-2]):
return True
return False
class HeavyDB(Library):
name = 'heavydb'
_function_names = list('''
allocate_varlen_buffer set_output_row_size
TableFunctionManager_error_message TableFunctionManager_set_output_row_size
table_function_error
'''.strip().split())
class Stdlib(Library):
"""
Reference: http://www.cplusplus.com/reference/cstdlib/
"""
name = 'stdlib'
_function_names = list(''' atof atoi atol atoll strtod strtof strtol strtold strtoll strtoul
strtoull rand srand calloc free malloc realloc abort atexit
at_quick_exit exit getenv quick_exit system bsearch qsort abs div
labs ldiv llabs lldiv mblen mbtowc wctomb mbstowcs wcstombs '''.strip().split())
class Stdio(Library):
"""
Reference: http://www.cplusplus.com/reference/cstdio/
"""
name = 'stdio'
_function_names = list(''' remove rename tmpfile tmpnam fclose fflush fopen freopen setbuf
setvbuf fprintf fscanf printf scanf snprintf sprintf sscanf
vfprintf vfscanf vprintf vscanf vsnprintf vsprintf vsscanf fgetc
fgets fputc fputs getc getchar gets putc putchar puts ungetc fread
fwrite fgetpos fseek fsetpos ftell rewind clearerr feof ferror
perror '''.strip().split())
class Mlib(Library):
"""
References:
https://www.gnu.org/software/libc/manual/html_node/Mathematics.html
https://en.cppreference.com/w/cpp/header/cmath
"""
name = 'm'
_function_names = list('''sin sinf sinl cos cosf cosl tan tanf tanl sincos sincosf sincosl
csin csinf csinl ccos ccosf ccosl ctan ctanf ctanl asin asinf
asinl acos acosf acosl atan atanf atanl atan2 atan2f atan2l casin
casinf casinl cacos cacosf cacosl catan catanf catanl exp expf
expl exp2 exp2f exp2l exp10 exp10f exp10l log logf logl log2 log2f
log2l log10 log10f log10l logb logbf logbl ilogb ilogbf ilogbl pow
powf powl sqrt sqrtf sqrtl cbrt cbrtf cbrtl hypot hypotf hypotl
expm1 expm1f expm1l log1p log1pf log1pl clog clogf clogl clog10
clog10f clog10l csqrt csqrtf csqrtl cpow cpowf cpowl sinh sinhf
sinhl cosh coshf coshl tanh tanhf tanhl csinh csinhf csinhl ccosh
ccoshf ccoshl ctanh ctanhf ctanhl asinh asinhf asinhl acosh acoshf
acoshl atanh atanhf atanhl casinh casinhf casinhl cacosh cacoshf
cacoshl catanh catanhf catanhl erf erff erfl erfc erfcf erfcl
lgamma lgammaf lgammal tgamma tgammaf tgammal lgamma_r lgammaf_r
lgammal_r gamma gammaf gammal j0 j0f j0l j1 j1f j1l jn jnf jnl y0
y0f y0l y1 y1f y1l yn ynf ynl rand srand rand_r random srandom
initstate setstate random_r srandom_r initstate_r setstate_r
drand48 erand48 lrand48 nrand48 mrand48 jrand48 srand48 seed48
lcong48 drand48_r erand48_r lrand48_r nrand48_r mrand48_r
jrand48_r srand48_r seed48_r lcong48_r abs labs llabs fabs fabsf
fabsl cabs cabsf cabsl frexp frexpf frexpl ldexp ldexpf ldexpl
scalb scalbf scalbl scalbn scalbnf scalbnl significand
significandf significandl ceil ceilf ceill floor floorf floorl
trunc truncf truncl rint rintf rintl nearbyint nearbyintf
nearbyintl round roundf roundl roundeven roundevenf roundevenl
lrint lrintf lrintl lround lroundf lroundl llround llroundf
llroundl fromfp fromfpf fromfpl ufromfp ufromfpf ufromfpl fromfpx
fromfpxf fromfpxl ufromfpx ufromfpxf ufromfpxl modf modff modfl
fmod fmodf fmodl remainder remainderf remainderl drem dremf dreml
copysign copysignf copysignl signbit signbitf signbitl nextafter
nextafterf nextafterl nexttoward nexttowardf nexttowardl nextup
nextupf nextupl nextdown nextdownf nextdownl nan nanf nanl
canonicalize canonicalizef canonicalizel getpayload getpayloadf
getpayloadl setpayload setpayloadf setpayloadl setpayloadsig
setpayloadsigf setpayloadsigl isgreater isgreaterequal isless
islessequal islessgreater isunordered iseqsig totalorder
totalorderf totalorderl totalordermag totalorderf totalorderl fmin
fminf fminl fmax fmaxf fmaxl fminmag fminmagf fminmagl fmaxmag
fmaxmagf fmaxmagl fdim fdimf fdiml fma fmaf fmal fadd faddf faddl
fsub fsubf fsubl fmul fmulf fmull fdiv fdivf fdivl llrint llrintf
llrintl'''.strip().split())
def drop_suffix(f):
s = f.rsplit('.', 1)[-1]
if s in ['p0i8', 'f64', 'f32', 'i1', 'i8', 'i16', 'i32', 'i64', 'i128']:
f = f[:-len(s)-1]
return drop_suffix(f)
return f
def get_llvm_name(f, prefix='llvm.'):
"""Return normalized name of a llvm intrinsic name.
"""
if f.startswith(prefix):
return drop_suffix(f[len(prefix):])
return f
class LLVMIntrinsics(Library):
"""LLVM intrinsic function names with prefix `llvm.` removed.
Reference: https://llvm.org/docs/LangRef.html#intrinsic-functions
"""
name = 'llvm'
def check(self, fname):
if fname.startswith('llvm.'):
return Library.check(self, get_llvm_name(fname))
return False
_function_names = list(''' va_start va_end va_copy gcroot gcread gcwrite returnaddress
addressofreturnaddress sponentry frameaddress stacksave
stackrestore get.dynamic.area.offset prefetch pcmarker
readcyclecounter clear_cache instrprof.increment
instrprof.increment.step instrprof.value.profile thread.pointer
call.preallocated.setup call.preallocated.arg
call.preallocated.teardown abs smax smin umax umin memcpy
memcpy.inline memmove sqrt powi sin cos pow exp exp2 log log10
log2 fma fabs minnum maxnum minimum maximum copysign floor ceil
trunc rint nearbyint round roundeven lround llround lrint llrint
ctpop ctlz cttz fshl fshr sadd.with.overflow uadd.with.overflow
ssub.with.overflow usub.with.overflow smul.with.overflow
umul.with.overflow sadd.sat uadd.sat ssub.sat usub.sat sshl.sat
ushl.sat smul.fix umul.fix smul.fix.sat umul.fix.sat sdiv.fix
udiv.fix sdiv.fix.sat udiv.fix.sat canonicalize fmuladd
set.loop.iterations test.set.loop.iterations loop.decrement.reg
loop.decrement vector.reduce.add vector.reduce.fadd
vector.reduce.mul vector.reduce.fmul vector.reduce.and
vector.reduce.or vector.reduce.xor vector.reduce.smax
vector.reduce.smin vector.reduce.umax vector.reduce.umin
vector.reduce.fmax vector.reduce.fmin matrix.transpose
matrix.multiply matrix.column.major.load matrix.column.major.store
convert.to.fp16 convert.from.fp16 init.trampoline
adjust.trampoline lifetime.start lifetime.end invariant.start
invariant.end launder.invariant.group strip.invariant.group
experimental.constrained.fadd experimental.constrained.fsub
experimental.constrained.fmul experimental.constrained.fdiv
experimental.constrained.frem experimental.constrained.fma
experimental.constrained.fptoui experimental.constrained.fptosi
experimental.constrained.uitofp experimental.constrained.sitofp
experimental.constrained.fptrunc experimental.constrained.fpext
experimental.constrained.fmuladd experimental.constrained.sqrt
experimental.constrained.pow experimental.constrained.powi
experimental.constrained.sin experimental.constrained.cos
experimental.constrained.exp experimental.constrained.exp2
experimental.constrained.log experimental.constrained.log10
experimental.constrained.log2 experimental.constrained.rint
experimental.constrained.lrint experimental.constrained.llrint
experimental.constrained.nearbyint experimental.constrained.maxnum
experimental.constrained.minnum experimental.constrained.maximum
experimental.constrained.minimum experimental.constrained.ceil
experimental.constrained.floor experimental.constrained.round
experimental.constrained.roundeven experimental.constrained.lround
experimental.constrained.llround experimental.constrained.trunc
experimental.gc.statepoint experimental.gc.result experimental.gc.relocate
experimental.gc.get.pointer.base experimental.gc.get.pointer.offset
experimental.vector.reduce.add.* experimental.vector.reduce.fadd.*
experimental.vector.reduce.mul.* experimental.vector.reduce.fmul.*
experimental.vector.reduce.and.* experimental.vector.reduce.or.*
experimental.vector.reduce.xor.* experimental.vector.reduce.smax.*
experimental.vector.reduce.smin.* experimental.vector.reduce.umax.*
experimental.vector.reduce.umin.* experimental.vector.reduce.fmax.*
experimental.vector.reduce.fmin.*
flt.rounds var.annotation ptr.annotation annotation
codeview.annotation trap debugtrap stackprotector stackguard
objectsize expect expect.with.probability assume ssa_copy
type.test type.checked.load donothing experimental.deoptimize
experimental.guard experimental.widenable.condition load.relative
sideeffect is.constant ptrmask vscale
memcpy.element.unordered.atomic memmove.element.unordered.atomic
memset.element.unordered.atomic objc.autorelease
objc.autoreleasePoolPop objc.autoreleasePoolPush
objc.autoreleaseReturnValue objc.copyWeak objc.destroyWeak
objc.initWeak objc.loadWeak objc.loadWeakRetained objc.moveWeak
objc.release objc.retain objc.retainAutorelease
objc.retainAutoreleaseReturnValue
objc.retainAutoreleasedReturnValue objc.retainBlock
objc.storeStrong objc.storeWeak preserve.array.access.index
preserve.union.access.index preserve.struct.access.index
masked.store.* memset'''.strip().split())
class NVVMIntrinsics(Library):
"""NVVM intrinsic function names with prefix `llvm.` removed.
Reference: https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#intrinsic-functions
"""
name = 'nvvm'
def check(self, fname):
if fname.startswith('llvm.'):
return Library.check(self, get_llvm_name(fname))
return False
_function_names = list(''' memcpy memmove memset sqrt fma bswap ctpop ctlz cttz fmuladd
convert.to.fp16.f32 convert.from.fp16.f32 convert.to.fp16
convert.from.fp16 lifetime.start lifetime.end invariant.start
invariant.end var.annotation ptr.annotation annotation expect
donothing '''.strip().split())
class Libdevice(Library):
"""NVIDIA libdevice function names with prefix `__nv_` removed.
Reference: https://docs.nvidia.com/cuda/libdevice-users-guide/function-desc.html#function-desc
"""
name = 'libdevice'
def check(self, fname):
if fname.startswith('__nv_'):
return Library.check(self, get_llvm_name(fname, prefix='__nv_'))
return False
_function_names = list(''' abs acos acosf acosh acoshf asin asinf asinh asinhf atan atan2
atan2f atanf atanh atanhf brev brevll byte_perm cbrt cbrtf ceil
ceilf clz clzll copysign copysignf cos cosf cosh coshf cospi
cospif dadd_rd dadd_rn dadd_ru dadd_rz ddiv_rd ddiv_rn ddiv_ru
ddiv_rz dmul_rd dmul_rn dmul_ru dmul_rz double2float_rd
double2float_rn double2float_ru double2float_rz double2hiint
double2int_rd double2int_rn double2int_ru double2int_rz
double2ll_rd double2ll_rn double2ll_ru double2ll_rz double2loint
double2uint_rd double2uint_rn double2uint_ru double2uint_rz
double2ull_rd double2ull_rn double2ull_ru double2ull_rz
double_as_longlong drcp_rd drcp_rn drcp_ru drcp_rz dsqrt_rd
dsqrt_rn dsqrt_ru dsqrt_rz erf erfc erfcf erfcinv erfcinvf erfcx
erfcxf erff erfinv erfinvf exp exp10 exp10f exp2 exp2f expf expm1
expm1f fabs fabsf fadd_rd fadd_rn fadd_ru fadd_rz fast_cosf
fast_exp10f fast_expf fast_fdividef fast_log10f fast_log2f
fast_logf fast_powf fast_sincosf fast_sinf fast_tanf fdim fdimf
fdiv_rd fdiv_rn fdiv_ru fdiv_rz ffs ffsll finitef float2half_rn
float2int_rd float2int_rn float2int_ru float2int_rz float2ll_rd
float2ll_rn float2ll_ru float2ll_rz float2uint_rd float2uint_rn
float2uint_ru float2uint_rz float2ull_rd float2ull_rn float2ull_ru
float2ull_rz float_as_int floor floorf fma fma_rd fma_rn fma_ru
fma_rz fmaf fmaf_rd fmaf_rn fmaf_ru fmaf_rz fmax fmaxf fmin fminf
fmod fmodf fmul_rd fmul_rn fmul_ru fmul_rz frcp_rd frcp_rn frcp_ru
frcp_rz frexp frexpf frsqrt_rn fsqrt_rd fsqrt_rn fsqrt_ru fsqrt_rz
fsub_rd fsub_rn fsub_ru fsub_rz hadd half2float hiloint2double
hypot hypotf ilogb ilogbf int2double_rn int2float_rd int2float_rn
int2float_ru int2float_rz int_as_float isfinited isinfd isinff
isnand isnanf j0 j0f j1 j1f jn jnf ldexp ldexpf lgamma lgammaf
ll2double_rd ll2double_rn ll2double_ru ll2double_rz ll2float_rd
ll2float_rn ll2float_ru ll2float_rz llabs llmax llmin llrint
llrintf llround llroundf log log10 log10f log1p log1pf log2 log2f
logb logbf logf longlong_as_double max min modf modff mul24
mul64hi mulhi nan nanf nearbyint nearbyintf nextafter nextafterf
normcdf normcdff normcdfinv normcdfinvf popc popcll pow powf powi
powif rcbrt rcbrtf remainder remainderf remquo remquof rhadd rint
rintf round roundf rsqrt rsqrtf sad saturatef scalbn scalbnf
signbitd signbitf sin sincos sincosf sincospi sincospif sinf sinh
sinhf sinpi sinpif sqrt sqrtf tan tanf tanh tanhf tgamma tgammaf
trunc truncf uhadd uint2double_rn uint2float_rd uint2float_rn
uint2float_ru uint2float_rz ull2double_rd ull2double_rn
ull2double_ru ull2double_rz ull2float_rd ull2float_rn ull2float_ru
ull2float_rz ullmax ullmin umax umin umul24 umul64hi umulhi urhadd
usad y0 y0f y1 y1f yn ynf '''.strip().split())
| """Collections of library function names.
"""
class Library:
"""Base class for a collection of library function names.
"""
@staticmethod
def get(libname, _cache={}):
if libname in _cache:
return _cache[libname]
if libname == 'stdlib':
r = Stdlib()
elif libname == 'stdio':
r = Stdio()
elif libname == 'm':
r = Mlib()
elif libname == 'libdevice':
r = Libdevice()
elif libname == 'nvvm':
r = NVVMIntrinsics()
elif libname == 'llvm':
r = LLVMIntrinsics()
elif libname == 'heavydb':
r = HeavyDB()
else:
raise ValueError(f'Unknown library {libname}')
_cache[libname] = r
return r
def __contains__(self, fname):
return self.check(fname)
def check(self, fname):
"""
Return True if library contains a function with given name.
"""
if fname in self._function_names:
return True
for func in self._function_names:
if func.endswith('.*') and fname.startswith(func[:-2]):
return True
return False
class HeavyDB(Library):
name = 'heavydb'
_function_names = list('''
allocate_varlen_buffer set_output_row_size
TableFunctionManager_error_message TableFunctionManager_set_output_row_size
table_function_error
'''.strip().split())
class Stdlib(Library):
"""
Reference: http://www.cplusplus.com/reference/cstdlib/
"""
name = 'stdlib'
_function_names = list(''' atof atoi atol atoll strtod strtof strtol strtold strtoll strtoul
strtoull rand srand calloc free malloc realloc abort atexit
at_quick_exit exit getenv quick_exit system bsearch qsort abs div
labs ldiv llabs lldiv mblen mbtowc wctomb mbstowcs wcstombs '''.strip().split())
class Stdio(Library):
"""
Reference: http://www.cplusplus.com/reference/cstdio/
"""
name = 'stdio'
_function_names = list(''' remove rename tmpfile tmpnam fclose fflush fopen freopen setbuf
setvbuf fprintf fscanf printf scanf snprintf sprintf sscanf
vfprintf vfscanf vprintf vscanf vsnprintf vsprintf vsscanf fgetc
fgets fputc fputs getc getchar gets putc putchar puts ungetc fread
fwrite fgetpos fseek fsetpos ftell rewind clearerr feof ferror
perror '''.strip().split())
class Mlib(Library):
"""
References:
https://www.gnu.org/software/libc/manual/html_node/Mathematics.html
https://en.cppreference.com/w/cpp/header/cmath
"""
name = 'm'
_function_names = list('''sin sinf sinl cos cosf cosl tan tanf tanl sincos sincosf sincosl
csin csinf csinl ccos ccosf ccosl ctan ctanf ctanl asin asinf
asinl acos acosf acosl atan atanf atanl atan2 atan2f atan2l casin
casinf casinl cacos cacosf cacosl catan catanf catanl exp expf
expl exp2 exp2f exp2l exp10 exp10f exp10l log logf logl log2 log2f
log2l log10 log10f log10l logb logbf logbl ilogb ilogbf ilogbl pow
powf powl sqrt sqrtf sqrtl cbrt cbrtf cbrtl hypot hypotf hypotl
expm1 expm1f expm1l log1p log1pf log1pl clog clogf clogl clog10
clog10f clog10l csqrt csqrtf csqrtl cpow cpowf cpowl sinh sinhf
sinhl cosh coshf coshl tanh tanhf tanhl csinh csinhf csinhl ccosh
ccoshf ccoshl ctanh ctanhf ctanhl asinh asinhf asinhl acosh acoshf
acoshl atanh atanhf atanhl casinh casinhf casinhl cacosh cacoshf
cacoshl catanh catanhf catanhl erf erff erfl erfc erfcf erfcl
lgamma lgammaf lgammal tgamma tgammaf tgammal lgamma_r lgammaf_r
lgammal_r gamma gammaf gammal j0 j0f j0l j1 j1f j1l jn jnf jnl y0
y0f y0l y1 y1f y1l yn ynf ynl rand srand rand_r random srandom
initstate setstate random_r srandom_r initstate_r setstate_r
drand48 erand48 lrand48 nrand48 mrand48 jrand48 srand48 seed48
lcong48 drand48_r erand48_r lrand48_r nrand48_r mrand48_r
jrand48_r srand48_r seed48_r lcong48_r abs labs llabs fabs fabsf
fabsl cabs cabsf cabsl frexp frexpf frexpl ldexp ldexpf ldexpl
scalb scalbf scalbl scalbn scalbnf scalbnl significand
significandf significandl ceil ceilf ceill floor floorf floorl
trunc truncf truncl rint rintf rintl nearbyint nearbyintf
nearbyintl round roundf roundl roundeven roundevenf roundevenl
lrint lrintf lrintl lround lroundf lroundl llround llroundf
llroundl fromfp fromfpf fromfpl ufromfp ufromfpf ufromfpl fromfpx
fromfpxf fromfpxl ufromfpx ufromfpxf ufromfpxl modf modff modfl
fmod fmodf fmodl remainder remainderf remainderl drem dremf dreml
copysign copysignf copysignl signbit signbitf signbitl nextafter
nextafterf nextafterl nexttoward nexttowardf nexttowardl nextup
nextupf nextupl nextdown nextdownf nextdownl nan nanf nanl
canonicalize canonicalizef canonicalizel getpayload getpayloadf
getpayloadl setpayload setpayloadf setpayloadl setpayloadsig
setpayloadsigf setpayloadsigl isgreater isgreaterequal isless
islessequal islessgreater isunordered iseqsig totalorder
totalorderf totalorderl totalordermag totalorderf totalorderl fmin
fminf fminl fmax fmaxf fmaxl fminmag fminmagf fminmagl fmaxmag
fmaxmagf fmaxmagl fdim fdimf fdiml fma fmaf fmal fadd faddf faddl
fsub fsubf fsubl fmul fmulf fmull fdiv fdivf fdivl llrint llrintf
llrintl'''.strip().split())
def drop_suffix(f):
s = f.rsplit('.', 1)[-1]
if s in ['p0i8', 'f64', 'f32', 'i1', 'i8', 'i16', 'i32', 'i64', 'i128']:
f = f[:-len(s)-1]
return drop_suffix(f)
return f
def get_llvm_name(f, prefix='llvm.'):
"""Return normalized name of a llvm intrinsic name.
"""
if f.startswith(prefix):
return drop_suffix(f[len(prefix):])
return f
class LLVMIntrinsics(Library):
"""LLVM intrinsic function names with prefix `llvm.` removed.
Reference: https://llvm.org/docs/LangRef.html#intrinsic-functions
"""
name = 'llvm'
def check(self, fname):
if fname.startswith('llvm.'):
return Library.check(self, get_llvm_name(fname))
return False
_function_names = list(''' va_start va_end va_copy gcroot gcread gcwrite returnaddress
addressofreturnaddress sponentry frameaddress stacksave
stackrestore get.dynamic.area.offset prefetch pcmarker
readcyclecounter clear_cache instrprof.increment
instrprof.increment.step instrprof.value.profile thread.pointer
call.preallocated.setup call.preallocated.arg
call.preallocated.teardown abs smax smin umax umin memcpy
memcpy.inline memmove sqrt powi sin cos pow exp exp2 log log10
log2 fma fabs minnum maxnum minimum maximum copysign floor ceil
trunc rint nearbyint round roundeven lround llround lrint llrint
ctpop ctlz cttz fshl fshr sadd.with.overflow uadd.with.overflow
ssub.with.overflow usub.with.overflow smul.with.overflow
umul.with.overflow sadd.sat uadd.sat ssub.sat usub.sat sshl.sat
ushl.sat smul.fix umul.fix smul.fix.sat umul.fix.sat sdiv.fix
udiv.fix sdiv.fix.sat udiv.fix.sat canonicalize fmuladd
set.loop.iterations test.set.loop.iterations loop.decrement.reg
loop.decrement vector.reduce.add vector.reduce.fadd
vector.reduce.mul vector.reduce.fmul vector.reduce.and
vector.reduce.or vector.reduce.xor vector.reduce.smax
vector.reduce.smin vector.reduce.umax vector.reduce.umin
vector.reduce.fmax vector.reduce.fmin matrix.transpose
matrix.multiply matrix.column.major.load matrix.column.major.store
convert.to.fp16 convert.from.fp16 init.trampoline
adjust.trampoline lifetime.start lifetime.end invariant.start
invariant.end launder.invariant.group strip.invariant.group
experimental.constrained.fadd experimental.constrained.fsub
experimental.constrained.fmul experimental.constrained.fdiv
experimental.constrained.frem experimental.constrained.fma
experimental.constrained.fptoui experimental.constrained.fptosi
experimental.constrained.uitofp experimental.constrained.sitofp
experimental.constrained.fptrunc experimental.constrained.fpext
experimental.constrained.fmuladd experimental.constrained.sqrt
experimental.constrained.pow experimental.constrained.powi
experimental.constrained.sin experimental.constrained.cos
experimental.constrained.exp experimental.constrained.exp2
experimental.constrained.log experimental.constrained.log10
experimental.constrained.log2 experimental.constrained.rint
experimental.constrained.lrint experimental.constrained.llrint
experimental.constrained.nearbyint experimental.constrained.maxnum
experimental.constrained.minnum experimental.constrained.maximum
experimental.constrained.minimum experimental.constrained.ceil
experimental.constrained.floor experimental.constrained.round
experimental.constrained.roundeven experimental.constrained.lround
experimental.constrained.llround experimental.constrained.trunc
experimental.gc.statepoint experimental.gc.result experimental.gc.relocate
experimental.gc.get.pointer.base experimental.gc.get.pointer.offset
experimental.vector.reduce.add.* experimental.vector.reduce.fadd.*
experimental.vector.reduce.mul.* experimental.vector.reduce.fmul.*
experimental.vector.reduce.and.* experimental.vector.reduce.or.*
experimental.vector.reduce.xor.* experimental.vector.reduce.smax.*
experimental.vector.reduce.smin.* experimental.vector.reduce.umax.*
experimental.vector.reduce.umin.* experimental.vector.reduce.fmax.*
experimental.vector.reduce.fmin.*
flt.rounds var.annotation ptr.annotation annotation
codeview.annotation trap debugtrap stackprotector stackguard
objectsize expect expect.with.probability assume ssa_copy
type.test type.checked.load donothing experimental.deoptimize
experimental.guard experimental.widenable.condition load.relative
sideeffect is.constant ptrmask vscale
memcpy.element.unordered.atomic memmove.element.unordered.atomic
memset.element.unordered.atomic objc.autorelease
objc.autoreleasePoolPop objc.autoreleasePoolPush
objc.autoreleaseReturnValue objc.copyWeak objc.destroyWeak
objc.initWeak objc.loadWeak objc.loadWeakRetained objc.moveWeak
objc.release objc.retain objc.retainAutorelease
objc.retainAutoreleaseReturnValue
objc.retainAutoreleasedReturnValue objc.retainBlock
objc.storeStrong objc.storeWeak preserve.array.access.index
preserve.union.access.index preserve.struct.access.index
masked.store.* memset'''.strip().split())
class NVVMIntrinsics(Library):
"""NVVM intrinsic function names with prefix `llvm.` removed.
Reference: https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#intrinsic-functions
"""
name = 'nvvm'
def check(self, fname):
if fname.startswith('llvm.'):
return Library.check(self, get_llvm_name(fname))
return False
_function_names = list(''' memcpy memmove memset sqrt fma bswap ctpop ctlz cttz fmuladd
convert.to.fp16.f32 convert.from.fp16.f32 convert.to.fp16
convert.from.fp16 lifetime.start lifetime.end invariant.start
invariant.end var.annotation ptr.annotation annotation expect
donothing '''.strip().split())
class Libdevice(Library):
"""NVIDIA libdevice function names with prefix `__nv_` removed.
Reference: https://docs.nvidia.com/cuda/libdevice-users-guide/function-desc.html#function-desc
"""
name = 'libdevice'
def check(self, fname):
if fname.startswith('__nv_'):
return Library.check(self, get_llvm_name(fname, prefix='__nv_'))
return False
_function_names = list(''' abs acos acosf acosh acoshf asin asinf asinh asinhf atan atan2
atan2f atanf atanh atanhf brev brevll byte_perm cbrt cbrtf ceil
ceilf clz clzll copysign copysignf cos cosf cosh coshf cospi
cospif dadd_rd dadd_rn dadd_ru dadd_rz ddiv_rd ddiv_rn ddiv_ru
ddiv_rz dmul_rd dmul_rn dmul_ru dmul_rz double2float_rd
double2float_rn double2float_ru double2float_rz double2hiint
double2int_rd double2int_rn double2int_ru double2int_rz
double2ll_rd double2ll_rn double2ll_ru double2ll_rz double2loint
double2uint_rd double2uint_rn double2uint_ru double2uint_rz
double2ull_rd double2ull_rn double2ull_ru double2ull_rz
double_as_longlong drcp_rd drcp_rn drcp_ru drcp_rz dsqrt_rd
dsqrt_rn dsqrt_ru dsqrt_rz erf erfc erfcf erfcinv erfcinvf erfcx
erfcxf erff erfinv erfinvf exp exp10 exp10f exp2 exp2f expf expm1
expm1f fabs fabsf fadd_rd fadd_rn fadd_ru fadd_rz fast_cosf
fast_exp10f fast_expf fast_fdividef fast_log10f fast_log2f
fast_logf fast_powf fast_sincosf fast_sinf fast_tanf fdim fdimf
fdiv_rd fdiv_rn fdiv_ru fdiv_rz ffs ffsll finitef float2half_rn
float2int_rd float2int_rn float2int_ru float2int_rz float2ll_rd
float2ll_rn float2ll_ru float2ll_rz float2uint_rd float2uint_rn
float2uint_ru float2uint_rz float2ull_rd float2ull_rn float2ull_ru
float2ull_rz float_as_int floor floorf fma fma_rd fma_rn fma_ru
fma_rz fmaf fmaf_rd fmaf_rn fmaf_ru fmaf_rz fmax fmaxf fmin fminf
fmod fmodf fmul_rd fmul_rn fmul_ru fmul_rz frcp_rd frcp_rn frcp_ru
frcp_rz frexp frexpf frsqrt_rn fsqrt_rd fsqrt_rn fsqrt_ru fsqrt_rz
fsub_rd fsub_rn fsub_ru fsub_rz hadd half2float hiloint2double
hypot hypotf ilogb ilogbf int2double_rn int2float_rd int2float_rn
int2float_ru int2float_rz int_as_float isfinited isinfd isinff
isnand isnanf j0 j0f j1 j1f jn jnf ldexp ldexpf lgamma lgammaf
ll2double_rd ll2double_rn ll2double_ru ll2double_rz ll2float_rd
ll2float_rn ll2float_ru ll2float_rz llabs llmax llmin llrint
llrintf llround llroundf log log10 log10f log1p log1pf log2 log2f
logb logbf logf longlong_as_double max min modf modff mul24
mul64hi mulhi nan nanf nearbyint nearbyintf nextafter nextafterf
normcdf normcdff normcdfinv normcdfinvf popc popcll pow powf powi
powif rcbrt rcbrtf remainder remainderf remquo remquof rhadd rint
rintf round roundf rsqrt rsqrtf sad saturatef scalbn scalbnf
signbitd signbitf sin sincos sincosf sincospi sincospif sinf sinh
sinhf sinpi sinpif sqrt sqrtf tan tanf tanh tanhf tgamma tgammaf
trunc truncf uhadd uint2double_rn uint2float_rd uint2float_rn
uint2float_ru uint2float_rz ull2double_rd ull2double_rn
ull2double_ru ull2double_rz ull2float_rd ull2float_rn ull2float_ru
ull2float_rz ullmax ullmin umax umin umul24 umul64hi umulhi urhadd
usad y0 y0f y1 y1f yn ynf '''.strip().split())
| en | 0.290133 | Collections of library function names. Base class for a collection of library function names. Return True if library contains a function with given name. allocate_varlen_buffer set_output_row_size TableFunctionManager_error_message TableFunctionManager_set_output_row_size table_function_error Reference: http://www.cplusplus.com/reference/cstdlib/ atof atoi atol atoll strtod strtof strtol strtold strtoll strtoul strtoull rand srand calloc free malloc realloc abort atexit at_quick_exit exit getenv quick_exit system bsearch qsort abs div labs ldiv llabs lldiv mblen mbtowc wctomb mbstowcs wcstombs Reference: http://www.cplusplus.com/reference/cstdio/ remove rename tmpfile tmpnam fclose fflush fopen freopen setbuf setvbuf fprintf fscanf printf scanf snprintf sprintf sscanf vfprintf vfscanf vprintf vscanf vsnprintf vsprintf vsscanf fgetc fgets fputc fputs getc getchar gets putc putchar puts ungetc fread fwrite fgetpos fseek fsetpos ftell rewind clearerr feof ferror perror References: https://www.gnu.org/software/libc/manual/html_node/Mathematics.html https://en.cppreference.com/w/cpp/header/cmath sin sinf sinl cos cosf cosl tan tanf tanl sincos sincosf sincosl csin csinf csinl ccos ccosf ccosl ctan ctanf ctanl asin asinf asinl acos acosf acosl atan atanf atanl atan2 atan2f atan2l casin casinf casinl cacos cacosf cacosl catan catanf catanl exp expf expl exp2 exp2f exp2l exp10 exp10f exp10l log logf logl log2 log2f log2l log10 log10f log10l logb logbf logbl ilogb ilogbf ilogbl pow powf powl sqrt sqrtf sqrtl cbrt cbrtf cbrtl hypot hypotf hypotl expm1 expm1f expm1l log1p log1pf log1pl clog clogf clogl clog10 clog10f clog10l csqrt csqrtf csqrtl cpow cpowf cpowl sinh sinhf sinhl cosh coshf coshl tanh tanhf tanhl csinh csinhf csinhl ccosh ccoshf ccoshl ctanh ctanhf ctanhl asinh asinhf asinhl acosh acoshf acoshl atanh atanhf atanhl casinh casinhf casinhl cacosh cacoshf cacoshl catanh catanhf catanhl erf erff erfl erfc erfcf erfcl lgamma lgammaf lgammal tgamma tgammaf tgammal lgamma_r lgammaf_r lgammal_r gamma gammaf gammal j0 j0f j0l j1 j1f j1l jn jnf jnl y0 y0f y0l y1 y1f y1l yn ynf ynl rand srand rand_r random srandom initstate setstate random_r srandom_r initstate_r setstate_r drand48 erand48 lrand48 nrand48 mrand48 jrand48 srand48 seed48 lcong48 drand48_r erand48_r lrand48_r nrand48_r mrand48_r jrand48_r srand48_r seed48_r lcong48_r abs labs llabs fabs fabsf fabsl cabs cabsf cabsl frexp frexpf frexpl ldexp ldexpf ldexpl scalb scalbf scalbl scalbn scalbnf scalbnl significand significandf significandl ceil ceilf ceill floor floorf floorl trunc truncf truncl rint rintf rintl nearbyint nearbyintf nearbyintl round roundf roundl roundeven roundevenf roundevenl lrint lrintf lrintl lround lroundf lroundl llround llroundf llroundl fromfp fromfpf fromfpl ufromfp ufromfpf ufromfpl fromfpx fromfpxf fromfpxl ufromfpx ufromfpxf ufromfpxl modf modff modfl fmod fmodf fmodl remainder remainderf remainderl drem dremf dreml copysign copysignf copysignl signbit signbitf signbitl nextafter nextafterf nextafterl nexttoward nexttowardf nexttowardl nextup nextupf nextupl nextdown nextdownf nextdownl nan nanf nanl canonicalize canonicalizef canonicalizel getpayload getpayloadf getpayloadl setpayload setpayloadf setpayloadl setpayloadsig setpayloadsigf setpayloadsigl isgreater isgreaterequal isless islessequal islessgreater isunordered iseqsig totalorder totalorderf totalorderl totalordermag totalorderf totalorderl fmin fminf fminl fmax fmaxf fmaxl fminmag fminmagf fminmagl fmaxmag fmaxmagf fmaxmagl fdim fdimf fdiml fma fmaf fmal fadd faddf faddl fsub fsubf fsubl fmul fmulf fmull fdiv fdivf fdivl llrint llrintf llrintl Return normalized name of a llvm intrinsic name. LLVM intrinsic function names with prefix `llvm.` removed. Reference: https://llvm.org/docs/LangRef.html#intrinsic-functions va_start va_end va_copy gcroot gcread gcwrite returnaddress addressofreturnaddress sponentry frameaddress stacksave stackrestore get.dynamic.area.offset prefetch pcmarker readcyclecounter clear_cache instrprof.increment instrprof.increment.step instrprof.value.profile thread.pointer call.preallocated.setup call.preallocated.arg call.preallocated.teardown abs smax smin umax umin memcpy memcpy.inline memmove sqrt powi sin cos pow exp exp2 log log10 log2 fma fabs minnum maxnum minimum maximum copysign floor ceil trunc rint nearbyint round roundeven lround llround lrint llrint ctpop ctlz cttz fshl fshr sadd.with.overflow uadd.with.overflow ssub.with.overflow usub.with.overflow smul.with.overflow umul.with.overflow sadd.sat uadd.sat ssub.sat usub.sat sshl.sat ushl.sat smul.fix umul.fix smul.fix.sat umul.fix.sat sdiv.fix udiv.fix sdiv.fix.sat udiv.fix.sat canonicalize fmuladd set.loop.iterations test.set.loop.iterations loop.decrement.reg loop.decrement vector.reduce.add vector.reduce.fadd vector.reduce.mul vector.reduce.fmul vector.reduce.and vector.reduce.or vector.reduce.xor vector.reduce.smax vector.reduce.smin vector.reduce.umax vector.reduce.umin vector.reduce.fmax vector.reduce.fmin matrix.transpose matrix.multiply matrix.column.major.load matrix.column.major.store convert.to.fp16 convert.from.fp16 init.trampoline adjust.trampoline lifetime.start lifetime.end invariant.start invariant.end launder.invariant.group strip.invariant.group experimental.constrained.fadd experimental.constrained.fsub experimental.constrained.fmul experimental.constrained.fdiv experimental.constrained.frem experimental.constrained.fma experimental.constrained.fptoui experimental.constrained.fptosi experimental.constrained.uitofp experimental.constrained.sitofp experimental.constrained.fptrunc experimental.constrained.fpext experimental.constrained.fmuladd experimental.constrained.sqrt experimental.constrained.pow experimental.constrained.powi experimental.constrained.sin experimental.constrained.cos experimental.constrained.exp experimental.constrained.exp2 experimental.constrained.log experimental.constrained.log10 experimental.constrained.log2 experimental.constrained.rint experimental.constrained.lrint experimental.constrained.llrint experimental.constrained.nearbyint experimental.constrained.maxnum experimental.constrained.minnum experimental.constrained.maximum experimental.constrained.minimum experimental.constrained.ceil experimental.constrained.floor experimental.constrained.round experimental.constrained.roundeven experimental.constrained.lround experimental.constrained.llround experimental.constrained.trunc experimental.gc.statepoint experimental.gc.result experimental.gc.relocate experimental.gc.get.pointer.base experimental.gc.get.pointer.offset experimental.vector.reduce.add.* experimental.vector.reduce.fadd.* experimental.vector.reduce.mul.* experimental.vector.reduce.fmul.* experimental.vector.reduce.and.* experimental.vector.reduce.or.* experimental.vector.reduce.xor.* experimental.vector.reduce.smax.* experimental.vector.reduce.smin.* experimental.vector.reduce.umax.* experimental.vector.reduce.umin.* experimental.vector.reduce.fmax.* experimental.vector.reduce.fmin.* flt.rounds var.annotation ptr.annotation annotation codeview.annotation trap debugtrap stackprotector stackguard objectsize expect expect.with.probability assume ssa_copy type.test type.checked.load donothing experimental.deoptimize experimental.guard experimental.widenable.condition load.relative sideeffect is.constant ptrmask vscale memcpy.element.unordered.atomic memmove.element.unordered.atomic memset.element.unordered.atomic objc.autorelease objc.autoreleasePoolPop objc.autoreleasePoolPush objc.autoreleaseReturnValue objc.copyWeak objc.destroyWeak objc.initWeak objc.loadWeak objc.loadWeakRetained objc.moveWeak objc.release objc.retain objc.retainAutorelease objc.retainAutoreleaseReturnValue objc.retainAutoreleasedReturnValue objc.retainBlock objc.storeStrong objc.storeWeak preserve.array.access.index preserve.union.access.index preserve.struct.access.index masked.store.* memset NVVM intrinsic function names with prefix `llvm.` removed. Reference: https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#intrinsic-functions memcpy memmove memset sqrt fma bswap ctpop ctlz cttz fmuladd convert.to.fp16.f32 convert.from.fp16.f32 convert.to.fp16 convert.from.fp16 lifetime.start lifetime.end invariant.start invariant.end var.annotation ptr.annotation annotation expect donothing NVIDIA libdevice function names with prefix `__nv_` removed. Reference: https://docs.nvidia.com/cuda/libdevice-users-guide/function-desc.html#function-desc abs acos acosf acosh acoshf asin asinf asinh asinhf atan atan2 atan2f atanf atanh atanhf brev brevll byte_perm cbrt cbrtf ceil ceilf clz clzll copysign copysignf cos cosf cosh coshf cospi cospif dadd_rd dadd_rn dadd_ru dadd_rz ddiv_rd ddiv_rn ddiv_ru ddiv_rz dmul_rd dmul_rn dmul_ru dmul_rz double2float_rd double2float_rn double2float_ru double2float_rz double2hiint double2int_rd double2int_rn double2int_ru double2int_rz double2ll_rd double2ll_rn double2ll_ru double2ll_rz double2loint double2uint_rd double2uint_rn double2uint_ru double2uint_rz double2ull_rd double2ull_rn double2ull_ru double2ull_rz double_as_longlong drcp_rd drcp_rn drcp_ru drcp_rz dsqrt_rd dsqrt_rn dsqrt_ru dsqrt_rz erf erfc erfcf erfcinv erfcinvf erfcx erfcxf erff erfinv erfinvf exp exp10 exp10f exp2 exp2f expf expm1 expm1f fabs fabsf fadd_rd fadd_rn fadd_ru fadd_rz fast_cosf fast_exp10f fast_expf fast_fdividef fast_log10f fast_log2f fast_logf fast_powf fast_sincosf fast_sinf fast_tanf fdim fdimf fdiv_rd fdiv_rn fdiv_ru fdiv_rz ffs ffsll finitef float2half_rn float2int_rd float2int_rn float2int_ru float2int_rz float2ll_rd float2ll_rn float2ll_ru float2ll_rz float2uint_rd float2uint_rn float2uint_ru float2uint_rz float2ull_rd float2ull_rn float2ull_ru float2ull_rz float_as_int floor floorf fma fma_rd fma_rn fma_ru fma_rz fmaf fmaf_rd fmaf_rn fmaf_ru fmaf_rz fmax fmaxf fmin fminf fmod fmodf fmul_rd fmul_rn fmul_ru fmul_rz frcp_rd frcp_rn frcp_ru frcp_rz frexp frexpf frsqrt_rn fsqrt_rd fsqrt_rn fsqrt_ru fsqrt_rz fsub_rd fsub_rn fsub_ru fsub_rz hadd half2float hiloint2double hypot hypotf ilogb ilogbf int2double_rn int2float_rd int2float_rn int2float_ru int2float_rz int_as_float isfinited isinfd isinff isnand isnanf j0 j0f j1 j1f jn jnf ldexp ldexpf lgamma lgammaf ll2double_rd ll2double_rn ll2double_ru ll2double_rz ll2float_rd ll2float_rn ll2float_ru ll2float_rz llabs llmax llmin llrint llrintf llround llroundf log log10 log10f log1p log1pf log2 log2f logb logbf logf longlong_as_double max min modf modff mul24 mul64hi mulhi nan nanf nearbyint nearbyintf nextafter nextafterf normcdf normcdff normcdfinv normcdfinvf popc popcll pow powf powi powif rcbrt rcbrtf remainder remainderf remquo remquof rhadd rint rintf round roundf rsqrt rsqrtf sad saturatef scalbn scalbnf signbitd signbitf sin sincos sincosf sincospi sincospif sinf sinh sinhf sinpi sinpif sqrt sqrtf tan tanf tanh tanhf tgamma tgammaf trunc truncf uhadd uint2double_rn uint2float_rd uint2float_rn uint2float_ru uint2float_rz ull2double_rd ull2double_rn ull2double_ru ull2double_rz ull2float_rd ull2float_rn ull2float_ru ull2float_rz ullmax ullmin umax umin umul24 umul64hi umulhi urhadd usad y0 y0f y1 y1f yn ynf | 2.989768 | 3 |
quick_pandas.py | chenmich/google-ml-crash-course-exercises | 0 | 9483 | <reponame>chenmich/google-ml-crash-course-exercises<filename>quick_pandas.py<gh_stars>0
import pandas as pd
print(pd.__version__)
city_names = pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
population = pd.Series([852469, 1015785, 485199])
#city_population_table = pd.DataFrame(({'City name': city_names, 'Population': population}))
california_houseing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
california_houseing_dataframe.describe()
california_houseing_dataframe.head()
#some error
#california_houseing_dataframe.hist('housing_median_age')
cities = pd.DataFrame({'City name': city_names, 'Population': population})
#print(type(cities['City name']))
#print(cities['City name'])
#print(type(cities['City name'][1]))
#print(cities['City name'][1])
#print(type(cities[0:2]))
#print(cities[0:2])
#print(population / 1000)
import numpy as np
np.log(population)
#print(population.apply(lambda val: val > 10000))
cities['Area square miles'] = pd.Series([46.87, 176.53, 97.92])
#print(cities)
cities['Population density'] = cities['Population'] / cities['Area square miles']
#print(cities)
print(city_names.index)
print(cities.reindex([2, 0, 1]))
print(cities) | import pandas as pd
print(pd.__version__)
city_names = pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
population = pd.Series([852469, 1015785, 485199])
#city_population_table = pd.DataFrame(({'City name': city_names, 'Population': population}))
california_houseing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
california_houseing_dataframe.describe()
california_houseing_dataframe.head()
#some error
#california_houseing_dataframe.hist('housing_median_age')
cities = pd.DataFrame({'City name': city_names, 'Population': population})
#print(type(cities['City name']))
#print(cities['City name'])
#print(type(cities['City name'][1]))
#print(cities['City name'][1])
#print(type(cities[0:2]))
#print(cities[0:2])
#print(population / 1000)
import numpy as np
np.log(population)
#print(population.apply(lambda val: val > 10000))
cities['Area square miles'] = pd.Series([46.87, 176.53, 97.92])
#print(cities)
cities['Population density'] = cities['Population'] / cities['Area square miles']
#print(cities)
print(city_names.index)
print(cities.reindex([2, 0, 1]))
print(cities) | en | 0.347443 | #city_population_table = pd.DataFrame(({'City name': city_names, 'Population': population})) #some error #california_houseing_dataframe.hist('housing_median_age') #print(type(cities['City name'])) #print(cities['City name']) #print(type(cities['City name'][1])) #print(cities['City name'][1]) #print(type(cities[0:2])) #print(cities[0:2]) #print(population / 1000) #print(population.apply(lambda val: val > 10000)) #print(cities) #print(cities) | 3.39732 | 3 |
src/helloworld/__main__.py | paulproteus/briefcase-toga-button-app-with-hacks | 2 | 9484 | <reponame>paulproteus/briefcase-toga-button-app-with-hacks
from helloworld.app import main
if True or __name__ == '__main__':
main().main_loop()
| from helloworld.app import main
if True or __name__ == '__main__':
main().main_loop() | none | 1 | 1.878178 | 2 |
|
backend/app/main.py | ianahart/blog | 0 | 9485 | <gh_stars>0
from fastapi import FastAPI
from dotenv import load_dotenv
from fastapi.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI()
load_dotenv()
app.include_router(api_router, prefix=settings.API_V1_STR)
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
if __name__ == "__main__":
# Use this for debugging purposes only
# pyright: reportGeneralTypeIssues=false
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug")
| from fastapi import FastAPI
from dotenv import load_dotenv
from fastapi.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI()
load_dotenv()
app.include_router(api_router, prefix=settings.API_V1_STR)
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
if __name__ == "__main__":
# Use this for debugging purposes only
# pyright: reportGeneralTypeIssues=false
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug") | en | 0.545152 | # Set all CORS enabled origins # Use this for debugging purposes only # pyright: reportGeneralTypeIssues=false | 1.98019 | 2 |
test_data/samples/alembic_template_output.py | goldstar611/ssort | 238 | 9486 | """Example revision
Revision ID: fdf0cf6487a3
Revises:
Create Date: 2021-08-09 17:55:19.491713
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"example",
sa.Column("example_id", sa.Integer(), nullable=False),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("measurements")
# ### end Alembic commands ###
| """Example revision
Revision ID: fdf0cf6487a3
Revises:
Create Date: 2021-08-09 17:55:19.491713
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"example",
sa.Column("example_id", sa.Integer(), nullable=False),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("measurements")
# ### end Alembic commands ###
| en | 0.501392 | Example revision Revision ID: fdf0cf6487a3 Revises: Create Date: 2021-08-09 17:55:19.491713 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.539363 | 2 |
.archived/snakecode/0173.py | gearbird/calgo | 4 | 9487 | <filename>.archived/snakecode/0173.py<gh_stars>1-10
from __future__ import annotations
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val: int = 0, left: Optional[TreeNode] = None, right: Optional[TreeNode] = None):
self.val = val
self.left = left
self.right = right
class BSTIterator:
def __init__(self, root: Optional[TreeNode]):
self.stack: list[TreeNode] = []
self.cur = root
def next(self) -> int:
if not self.hasNext():
raise StopIteration()
self.cur = self.stack[-1].right
return self.stack.pop().val
def hasNext(self) -> bool:
while self.cur:
self.stack.append(self.cur)
self.cur = self.cur.left
if self.stack: return True
return False
| <filename>.archived/snakecode/0173.py<gh_stars>1-10
from __future__ import annotations
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val: int = 0, left: Optional[TreeNode] = None, right: Optional[TreeNode] = None):
self.val = val
self.left = left
self.right = right
class BSTIterator:
def __init__(self, root: Optional[TreeNode]):
self.stack: list[TreeNode] = []
self.cur = root
def next(self) -> int:
if not self.hasNext():
raise StopIteration()
self.cur = self.stack[-1].right
return self.stack.pop().val
def hasNext(self) -> bool:
while self.cur:
self.stack.append(self.cur)
self.cur = self.cur.left
if self.stack: return True
return False
| en | 0.652542 | # Definition for a binary tree node. | 3.370604 | 3 |
.leetcode/506.relative-ranks.py | KuiyuanFu/PythonLeetCode | 0 | 9488 | <filename>.leetcode/506.relative-ranks.py
# @lc app=leetcode id=506 lang=python3
#
# [506] Relative Ranks
#
# https://leetcode.com/problems/relative-ranks/description/
#
# algorithms
# Easy (53.46%)
# Likes: 188
# Dislikes: 9
# Total Accepted: 71.1K
# Total Submissions: 132.4K
# Testcase Example: '[5,4,3,2,1]'
#
# You are given an integer array score of size n, where score[i] is the score
# of the i^th athlete in a competition. All the scores are guaranteed to be
# unique.
#
# The athletes are placed based on their scores, where the 1^st place athlete
# has the highest score, the 2^nd place athlete has the 2^nd highest score, and
# so on. The placement of each athlete determines their rank:
#
#
# The 1^st place athlete's rank is "Gold Medal".
# The 2^nd place athlete's rank is "Silver Medal".
# The 3^rd place athlete's rank is "Bronze Medal".
# For the 4^th place to the n^th place athlete, their rank is their placement
# number (i.e., the x^th place athlete's rank is "x").
#
#
# Return an array answer of size n where answer[i] is the rank of the i^th
# athlete.
#
#
# Example 1:
#
#
# Input: score = [5,4,3,2,1]
# Output: ["Gold Medal","Silver Medal","Bronze Medal","4","5"]
# Explanation: The placements are [1^st, 2^nd, 3^rd, 4^th, 5^th].
#
# Example 2:
#
#
# Input: score = [10,3,8,9,4]
# Output: ["Gold Medal","5","Bronze Medal","Silver Medal","4"]
# Explanation: The placements are [1^st, 5^th, 3^rd, 2^nd, 4^th].
#
#
#
#
# Constraints:
#
#
# n == score.length
# 1 <= n <= 10^4
# 0 <= score[i] <= 10^6
# All the values in score are unique.
#
#
#
# @lc tags=Unknown
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 排序。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def findRelativeRanks(self, score: List[int]) -> List[str]:
s = [(-s, i) for i, s in enumerate(score)]
s.sort()
ss = ['Gold Medal', 'Silver Medal', 'Bronze Medal']
def toS(idx):
if idx >= 3:
return str(idx + 1)
return ss[idx]
res = [''] * len(score)
for idx, (_, i) in enumerate(s):
res[i] = toS(idx)
return res
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('score = [5,4,3,2,1]')
print('Exception :')
print('["Gold Medal","Silver Medal","Bronze Medal","4","5"]')
print('Output :')
print(str(Solution().findRelativeRanks([5, 4, 3, 2, 1])))
print()
print('Example 2:')
print('Input : ')
print('score = [10,3,8,9,4]')
print('Exception :')
print('["Gold Medal","5","Bronze Medal","Silver Medal","4"]')
print('Output :')
print(str(Solution().findRelativeRanks([10, 3, 8, 9, 4])))
print()
pass
# @lc main=end | <filename>.leetcode/506.relative-ranks.py
# @lc app=leetcode id=506 lang=python3
#
# [506] Relative Ranks
#
# https://leetcode.com/problems/relative-ranks/description/
#
# algorithms
# Easy (53.46%)
# Likes: 188
# Dislikes: 9
# Total Accepted: 71.1K
# Total Submissions: 132.4K
# Testcase Example: '[5,4,3,2,1]'
#
# You are given an integer array score of size n, where score[i] is the score
# of the i^th athlete in a competition. All the scores are guaranteed to be
# unique.
#
# The athletes are placed based on their scores, where the 1^st place athlete
# has the highest score, the 2^nd place athlete has the 2^nd highest score, and
# so on. The placement of each athlete determines their rank:
#
#
# The 1^st place athlete's rank is "Gold Medal".
# The 2^nd place athlete's rank is "Silver Medal".
# The 3^rd place athlete's rank is "Bronze Medal".
# For the 4^th place to the n^th place athlete, their rank is their placement
# number (i.e., the x^th place athlete's rank is "x").
#
#
# Return an array answer of size n where answer[i] is the rank of the i^th
# athlete.
#
#
# Example 1:
#
#
# Input: score = [5,4,3,2,1]
# Output: ["Gold Medal","Silver Medal","Bronze Medal","4","5"]
# Explanation: The placements are [1^st, 2^nd, 3^rd, 4^th, 5^th].
#
# Example 2:
#
#
# Input: score = [10,3,8,9,4]
# Output: ["Gold Medal","5","Bronze Medal","Silver Medal","4"]
# Explanation: The placements are [1^st, 5^th, 3^rd, 2^nd, 4^th].
#
#
#
#
# Constraints:
#
#
# n == score.length
# 1 <= n <= 10^4
# 0 <= score[i] <= 10^6
# All the values in score are unique.
#
#
#
# @lc tags=Unknown
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 排序。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def findRelativeRanks(self, score: List[int]) -> List[str]:
s = [(-s, i) for i, s in enumerate(score)]
s.sort()
ss = ['Gold Medal', 'Silver Medal', 'Bronze Medal']
def toS(idx):
if idx >= 3:
return str(idx + 1)
return ss[idx]
res = [''] * len(score)
for idx, (_, i) in enumerate(s):
res[i] = toS(idx)
return res
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('score = [5,4,3,2,1]')
print('Exception :')
print('["Gold Medal","Silver Medal","Bronze Medal","4","5"]')
print('Output :')
print(str(Solution().findRelativeRanks([5, 4, 3, 2, 1])))
print()
print('Example 2:')
print('Input : ')
print('score = [10,3,8,9,4]')
print('Exception :')
print('["Gold Medal","5","Bronze Medal","Silver Medal","4"]')
print('Output :')
print(str(Solution().findRelativeRanks([10, 3, 8, 9, 4])))
print()
pass
# @lc main=end | en | 0.869652 | # @lc app=leetcode id=506 lang=python3 # # [506] Relative Ranks # # https://leetcode.com/problems/relative-ranks/description/ # # algorithms # Easy (53.46%) # Likes: 188 # Dislikes: 9 # Total Accepted: 71.1K # Total Submissions: 132.4K # Testcase Example: '[5,4,3,2,1]' # # You are given an integer array score of size n, where score[i] is the score # of the i^th athlete in a competition. All the scores are guaranteed to be # unique. # # The athletes are placed based on their scores, where the 1^st place athlete # has the highest score, the 2^nd place athlete has the 2^nd highest score, and # so on. The placement of each athlete determines their rank: # # # The 1^st place athlete's rank is "Gold Medal". # The 2^nd place athlete's rank is "Silver Medal". # The 3^rd place athlete's rank is "Bronze Medal". # For the 4^th place to the n^th place athlete, their rank is their placement # number (i.e., the x^th place athlete's rank is "x"). # # # Return an array answer of size n where answer[i] is the rank of the i^th # athlete. # # # Example 1: # # # Input: score = [5,4,3,2,1] # Output: ["Gold Medal","Silver Medal","Bronze Medal","4","5"] # Explanation: The placements are [1^st, 2^nd, 3^rd, 4^th, 5^th]. # # Example 2: # # # Input: score = [10,3,8,9,4] # Output: ["Gold Medal","5","Bronze Medal","Silver Medal","4"] # Explanation: The placements are [1^st, 5^th, 3^rd, 2^nd, 4^th]. # # # # # Constraints: # # # n == score.length # 1 <= n <= 10^4 # 0 <= score[i] <= 10^6 # All the values in score are unique. # # # # @lc tags=Unknown # @lc imports=start # @lc imports=end # @lc idea=start # # 排序。 # # @lc idea=end # @lc group= # @lc rank= # @lc code=start # @lc code=end # @lc main=start # @lc main=end | 3.610702 | 4 |
test/msan/lit.cfg.py | QuarkTheAwesome/compiler-rt-be-aeabi | 118 | 9489 | # -*- Python -*-
import os
# Setup config name.
config.name = 'MemorySanitizer' + getattr(config, 'name_suffix', 'default')
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
# Setup default compiler flags used with -fsanitize=memory option.
clang_msan_cflags = (["-fsanitize=memory",
"-mno-omit-leaf-frame-pointer",
"-fno-omit-frame-pointer",
"-fno-optimize-sibling-calls"] +
[config.target_cflags] +
config.debug_info_flags)
# Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD.
if config.host_os == 'FreeBSD':
clang_msan_cflags += ["-lexecinfo", "-fPIC"]
clang_msan_cxxflags = config.cxx_mode_flags + clang_msan_cflags
# Flags for KMSAN invocation. This is C-only, we're not interested in C++.
clang_kmsan_cflags = (["-fsanitize=kernel-memory"] +
[config.target_cflags] +
config.debug_info_flags)
def build_invocation(compile_flags):
return " " + " ".join([config.clang] + compile_flags) + " "
config.substitutions.append( ("%clang_msan ", build_invocation(clang_msan_cflags)) )
config.substitutions.append( ("%clangxx_msan ", build_invocation(clang_msan_cxxflags)) )
config.substitutions.append( ("%clang_kmsan ", build_invocation(clang_kmsan_cflags)) )
# Default test suffixes.
config.suffixes = ['.c', '.cc', '.cpp']
if config.host_os not in ['Linux', 'NetBSD', 'FreeBSD']:
config.unsupported = True
# For mips64, mips64el we have forced store_context_size to 1 because these
# archs use slow unwinder which is not async signal safe. Therefore we only
# check the first frame since store_context size is 1.
if config.host_arch in ['mips64', 'mips64el']:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK'))
else:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK'))
| # -*- Python -*-
import os
# Setup config name.
config.name = 'MemorySanitizer' + getattr(config, 'name_suffix', 'default')
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
# Setup default compiler flags used with -fsanitize=memory option.
clang_msan_cflags = (["-fsanitize=memory",
"-mno-omit-leaf-frame-pointer",
"-fno-omit-frame-pointer",
"-fno-optimize-sibling-calls"] +
[config.target_cflags] +
config.debug_info_flags)
# Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD.
if config.host_os == 'FreeBSD':
clang_msan_cflags += ["-lexecinfo", "-fPIC"]
clang_msan_cxxflags = config.cxx_mode_flags + clang_msan_cflags
# Flags for KMSAN invocation. This is C-only, we're not interested in C++.
clang_kmsan_cflags = (["-fsanitize=kernel-memory"] +
[config.target_cflags] +
config.debug_info_flags)
def build_invocation(compile_flags):
return " " + " ".join([config.clang] + compile_flags) + " "
config.substitutions.append( ("%clang_msan ", build_invocation(clang_msan_cflags)) )
config.substitutions.append( ("%clangxx_msan ", build_invocation(clang_msan_cxxflags)) )
config.substitutions.append( ("%clang_kmsan ", build_invocation(clang_kmsan_cflags)) )
# Default test suffixes.
config.suffixes = ['.c', '.cc', '.cpp']
if config.host_os not in ['Linux', 'NetBSD', 'FreeBSD']:
config.unsupported = True
# For mips64, mips64el we have forced store_context_size to 1 because these
# archs use slow unwinder which is not async signal safe. Therefore we only
# check the first frame since store_context size is 1.
if config.host_arch in ['mips64', 'mips64el']:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK'))
else:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK'))
| en | 0.775116 | # -*- Python -*- # Setup config name. # Setup source root. # Setup default compiler flags used with -fsanitize=memory option. # Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD. # Flags for KMSAN invocation. This is C-only, we're not interested in C++. # Default test suffixes. # For mips64, mips64el we have forced store_context_size to 1 because these # archs use slow unwinder which is not async signal safe. Therefore we only # check the first frame since store_context size is 1. | 1.954833 | 2 |
application/core/migrations/0001_initial.py | victor-freitas/ProjetoNCS | 0 | 9490 | <reponame>victor-freitas/ProjetoNCS
# Generated by Django 2.0.6 on 2018-06-17 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('cpf_cnpj', models.IntegerField(db_column='CPF_CNPJ', unique=True)),
('razao', models.CharField(blank=True, db_column='RAZAO', max_length=100, null=True)),
('endereco', models.CharField(db_column='ENDERECO', max_length=80)),
('cep', models.CharField(db_column='CEP', max_length=20)),
('email', models.CharField(db_column='EMAIL', max_length=200)),
('telefone', models.CharField(db_column='TELEFONE', max_length=11)),
('celular', models.CharField(blank=True, db_column='CELULAR', max_length=11, null=True)),
],
options={
'db_table': 'Cliente',
'managed': False,
},
),
migrations.CreateModel(
name='Fornecedor',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('cpf_cnpj', models.IntegerField(db_column='CPF_CNPJ', unique=True)),
('razao', models.CharField(blank=True, db_column='RAZAO', max_length=100, null=True)),
('endereco', models.CharField(db_column='ENDERECO', max_length=80)),
('cep', models.CharField(db_column='CEP', max_length=20)),
('email', models.CharField(db_column='EMAIL', max_length=200)),
('telefone', models.CharField(db_column='TELEFONE', max_length=11)),
('celular', models.CharField(blank=True, db_column='CELULAR', max_length=11, null=True)),
('pessoa_contato', models.CharField(blank=True, db_column='PESSOA_CONTATO', max_length=100, null=True)),
],
options={
'db_table': 'Fornecedor',
'managed': False,
},
),
migrations.CreateModel(
name='Funcionario',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
('cpf', models.IntegerField(db_column='CPF')),
('cargo', models.SmallIntegerField(db_column='CARGO')),
('login', models.CharField(db_column='LOGIN', max_length=100)),
('senha', models.CharField(db_column='SENHA', max_length=50)),
],
options={
'db_table': 'Funcionario',
'managed': False,
},
),
migrations.CreateModel(
name='Materiaprima',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=60)),
('forma_emb', models.CharField(db_column='FORMA_EMB', max_length=60)),
('peso', models.CharField(db_column='PESO', max_length=20)),
('unid_medida', models.CharField(db_column='UNID_MEDIDA', max_length=50)),
('quantidade', models.IntegerField(db_column='QUANTIDADE')),
('quantidade_min', models.IntegerField(db_column='QUANTIDADE_MIN')),
('descricao', models.CharField(db_column='DESCRICAO', max_length=500)),
('data_recebimento', models.DateField(db_column='DATA_RECEBIMENTO')),
],
options={
'db_table': 'MateriaPrima',
'managed': False,
},
),
migrations.CreateModel(
name='Ordemdeproducao',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('descricao', models.CharField(db_column='Descricao', max_length=500)),
],
options={
'db_table': 'OrdemDeProducao',
'managed': False,
},
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('data_pedido', models.DateField(db_column='DATA_PEDIDO')),
('valor', models.CharField(blank=True, db_column='VALOR', max_length=20, null=True)),
],
options={
'db_table': 'Pedido',
'managed': False,
},
),
migrations.CreateModel(
name='Pedidomp',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('data_pedido', models.DateField(db_column='DATA_PEDIDO')),
('data_prevista', models.DateField(db_column='DATA_PREVISTA')),
('descricao', models.CharField(blank=True, db_column='DESCRICAO', max_length=500, null=True)),
('valor', models.CharField(blank=True, db_column='VALOR', max_length=20, null=True)),
],
options={
'db_table': 'PedidoMP',
'managed': False,
},
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=60)),
('forma_emb', models.CharField(db_column='FORMA_EMB', max_length=60)),
('peso', models.CharField(db_column='PESO', max_length=20)),
('unid_medida', models.CharField(db_column='UNID_MEDIDA', max_length=50)),
('preco', models.CharField(blank=True, db_column='PRECO', max_length=10, null=True)),
('quantidade', models.IntegerField(blank=True, db_column='QUANTIDADE', null=True)),
('desc_produto', models.CharField(db_column='DESC_PRODUTO', max_length=500)),
],
options={
'db_table': 'Produto',
'managed': False,
},
),
migrations.CreateModel(
name='Setor',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
],
options={
'db_table': 'Setor',
'managed': False,
},
),
migrations.CreateModel(
name='Statusordemproducao',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('status_nome', models.CharField(db_column='STATUS_NOME', max_length=30)),
],
options={
'db_table': 'StatusOrdemProducao',
'managed': False,
},
),
migrations.CreateModel(
name='Tipoproduto',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
],
options={
'db_table': 'TipoProduto',
'managed': False,
},
),
migrations.CreateModel(
name='Tiposeguimento',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
],
options={
'db_table': 'TipoSeguimento',
'managed': False,
},
),
]
| # Generated by Django 2.0.6 on 2018-06-17 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('cpf_cnpj', models.IntegerField(db_column='CPF_CNPJ', unique=True)),
('razao', models.CharField(blank=True, db_column='RAZAO', max_length=100, null=True)),
('endereco', models.CharField(db_column='ENDERECO', max_length=80)),
('cep', models.CharField(db_column='CEP', max_length=20)),
('email', models.CharField(db_column='EMAIL', max_length=200)),
('telefone', models.CharField(db_column='TELEFONE', max_length=11)),
('celular', models.CharField(blank=True, db_column='CELULAR', max_length=11, null=True)),
],
options={
'db_table': 'Cliente',
'managed': False,
},
),
migrations.CreateModel(
name='Fornecedor',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('cpf_cnpj', models.IntegerField(db_column='CPF_CNPJ', unique=True)),
('razao', models.CharField(blank=True, db_column='RAZAO', max_length=100, null=True)),
('endereco', models.CharField(db_column='ENDERECO', max_length=80)),
('cep', models.CharField(db_column='CEP', max_length=20)),
('email', models.CharField(db_column='EMAIL', max_length=200)),
('telefone', models.CharField(db_column='TELEFONE', max_length=11)),
('celular', models.CharField(blank=True, db_column='CELULAR', max_length=11, null=True)),
('pessoa_contato', models.CharField(blank=True, db_column='PESSOA_CONTATO', max_length=100, null=True)),
],
options={
'db_table': 'Fornecedor',
'managed': False,
},
),
migrations.CreateModel(
name='Funcionario',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
('cpf', models.IntegerField(db_column='CPF')),
('cargo', models.SmallIntegerField(db_column='CARGO')),
('login', models.CharField(db_column='LOGIN', max_length=100)),
('senha', models.CharField(db_column='SENHA', max_length=50)),
],
options={
'db_table': 'Funcionario',
'managed': False,
},
),
migrations.CreateModel(
name='Materiaprima',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=60)),
('forma_emb', models.CharField(db_column='FORMA_EMB', max_length=60)),
('peso', models.CharField(db_column='PESO', max_length=20)),
('unid_medida', models.CharField(db_column='UNID_MEDIDA', max_length=50)),
('quantidade', models.IntegerField(db_column='QUANTIDADE')),
('quantidade_min', models.IntegerField(db_column='QUANTIDADE_MIN')),
('descricao', models.CharField(db_column='DESCRICAO', max_length=500)),
('data_recebimento', models.DateField(db_column='DATA_RECEBIMENTO')),
],
options={
'db_table': 'MateriaPrima',
'managed': False,
},
),
migrations.CreateModel(
name='Ordemdeproducao',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('descricao', models.CharField(db_column='Descricao', max_length=500)),
],
options={
'db_table': 'OrdemDeProducao',
'managed': False,
},
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('data_pedido', models.DateField(db_column='DATA_PEDIDO')),
('valor', models.CharField(blank=True, db_column='VALOR', max_length=20, null=True)),
],
options={
'db_table': 'Pedido',
'managed': False,
},
),
migrations.CreateModel(
name='Pedidomp',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('data_pedido', models.DateField(db_column='DATA_PEDIDO')),
('data_prevista', models.DateField(db_column='DATA_PREVISTA')),
('descricao', models.CharField(blank=True, db_column='DESCRICAO', max_length=500, null=True)),
('valor', models.CharField(blank=True, db_column='VALOR', max_length=20, null=True)),
],
options={
'db_table': 'PedidoMP',
'managed': False,
},
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=60)),
('forma_emb', models.CharField(db_column='FORMA_EMB', max_length=60)),
('peso', models.CharField(db_column='PESO', max_length=20)),
('unid_medida', models.CharField(db_column='UNID_MEDIDA', max_length=50)),
('preco', models.CharField(blank=True, db_column='PRECO', max_length=10, null=True)),
('quantidade', models.IntegerField(blank=True, db_column='QUANTIDADE', null=True)),
('desc_produto', models.CharField(db_column='DESC_PRODUTO', max_length=500)),
],
options={
'db_table': 'Produto',
'managed': False,
},
),
migrations.CreateModel(
name='Setor',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
],
options={
'db_table': 'Setor',
'managed': False,
},
),
migrations.CreateModel(
name='Statusordemproducao',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('status_nome', models.CharField(db_column='STATUS_NOME', max_length=30)),
],
options={
'db_table': 'StatusOrdemProducao',
'managed': False,
},
),
migrations.CreateModel(
name='Tipoproduto',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
],
options={
'db_table': 'TipoProduto',
'managed': False,
},
),
migrations.CreateModel(
name='Tiposeguimento',
fields=[
('id', models.SmallIntegerField(db_column='ID', primary_key=True, serialize=False)),
('nome', models.CharField(db_column='NOME', max_length=100)),
],
options={
'db_table': 'TipoSeguimento',
'managed': False,
},
),
] | en | 0.794675 | # Generated by Django 2.0.6 on 2018-06-17 04:47 | 1.760594 | 2 |
dataschema/entity.py | vingkan/sql_tools | 1 | 9491 | #
# nuna_sql_tools: Copyright 2022 Nuna Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilityes for checking and."""
import dataclasses
import datetime
import decimal
from types import ModuleType
from typing import NewType, Union
# In your data declaration python modules define a JAVA_PACKAGE
# variable at top level to specify the corresponding Java package of generated
# classes.
JAVA_PACKAGE = 'JAVA_PACKAGE'
def GetJavaPackage(module: ModuleType) -> str:
if hasattr(module, JAVA_PACKAGE):
return getattr(module, JAVA_PACKAGE)
else:
return module.__name__
_SCHEMA_ANNOTATIONS = '__schema_annotations__'
_EXPECTED_DICT_KEYS = set([
'__module__', '__annotations__', '__doc__', '__dict__', '__weakref__',
'__dataclass_params__', '__dataclass_fields__', _SCHEMA_ANNOTATIONS
])
_EXPECTED_FUNCTIONS = ['__init__', '__repr__', '__eq__', '__hash__']
_BASE_TYPES = set([
int, bytes, str, float, bool, datetime.date, datetime.datetime,
decimal.Decimal
])
_SCHEMA_ANNOTATIONS = '__schema_annotations__'
_CLASS_ID = 0
def _Annotate(cls=None, annotation=None):
"""Annotates a class or a type. `annotation` should from annotation.py"""
def Wrap(cls):
schema_annotations = []
if hasattr(cls, _SCHEMA_ANNOTATIONS):
schema_annotations.extend(getattr(cls, _SCHEMA_ANNOTATIONS))
if isinstance(annotation, list):
schema_annotations.extend(annotation)
else:
schema_annotations.append(annotation)
global _CLASS_ID
_CLASS_ID += 1
supertype = cls
if hasattr(cls, '__supertype__'):
supertype = cls.__supertype__
annotated_type = NewType(f'Annotated_{_CLASS_ID}', supertype)
setattr(annotated_type, _SCHEMA_ANNOTATIONS, schema_annotations)
return annotated_type
if cls is None:
return Wrap
return Wrap(cls)
def Annotate(cls, annotation):
"""Annotates a field type with the provided annotation."""
return _Annotate(cls, annotation=annotation)
def IsAnnotatedType(field_cls: type):
"""If provided field_cls is an annotated type."""
return hasattr(field_cls, _SCHEMA_ANNOTATIONS)
def GetAnnotatedType(field_cls: type):
"""Returns the original type behind the annotation (if any)."""
if IsAnnotatedType(field_cls) and hasattr(field_cls, '__supertype__'):
return field_cls.__supertype__
return field_cls
def IsOptionalType(field_cls: type):
"""If the field_cls looks like an Optional[...] type."""
return (hasattr(field_cls, '__origin__')
# pylint: disable=comparison-with-callable
and field_cls.__origin__ == Union and len(field_cls.__args__) == 2
and field_cls.__args__[1] == type(None))
def GetOptionalType(field_cls: type):
"""Returns the type of optional & annotation or None if not optional."""
field_cls = GetAnnotatedType(field_cls)
if IsOptionalType(field_cls):
return field_cls.__args__[0]
return None
def GetOriginalType(field_cls: type):
"""Returns the type of field_cls, behind annotations and Optional."""
field_cls = GetAnnotatedType(field_cls)
if IsOptionalType(field_cls):
return field_cls.__args__[0]
return field_cls
def GetStructuredTypeName(field_cls: type):
"""Returns the structure type name for a type, behind annotation."""
field_cls = GetAnnotatedType(field_cls)
if not hasattr(field_cls, '__origin__'):
return None
if field_cls.__origin__ is dict:
return 'dict'
elif field_cls.__origin__ is list:
return 'list'
elif field_cls.__origin__ is set:
return 'set'
return None
def IsBasicType(field_cls: type):
"""If the type field_cls looks like one of the basic field types."""
if GetAnnotatedType(field_cls) in _BASE_TYPES:
return True
_MAX_DEPTH = 30
class FieldTypeChecker:
"""Checks the type of a fields in a dataclass."""
def __init__(self, field_name, field_cls):
self.field_name = field_name
self.field_cls = field_cls
self.checked = set()
def _check(self, field_cls, depth):
"""Check if the type of a field is acceptable."""
if field_cls in self.checked:
return True
if depth > _MAX_DEPTH:
raise ValueError(f'Recursive field type found at {field_cls} '
f'for field `{self.field_name}`')
field_cls = GetAnnotatedType(field_cls)
if IsBasicType(field_cls):
return True
if hasattr(field_cls, '__origin__'):
if field_cls.__origin__ is dict:
self._check(field_cls.__args__[0], depth)
self._check(field_cls.__args__[1], depth)
elif field_cls.__origin__ is list:
self._check(field_cls.__args__[0], depth)
elif field_cls.__origin__ is set:
self._check(field_cls.__args__[0], depth)
elif ( # pylint: disable=comparison-with-callable
field_cls.__origin__ == Union and
len(field_cls.__args__) == 2 and
field_cls.__args__[1] == type(None)):
if GetStructuredTypeName(field_cls) is not None:
raise ValueError('Cannot have Optional structured fields.'
'(e.g. Optional[List or Set or Dict])')
# Optional[...]
self._check(field_cls.__args__[0], depth)
else:
raise ValueError(f'Invalid origin class for {field_cls}: '
f'`{field_cls.__origin__}`')
else:
checker = DataclassChecker(field_cls)
if checker.check_is_dataclass() is not None:
raise ValueError(
f'Invalid type surfaced for field `{self.field_name}`: '
f'`{self.field_cls}` - {field_cls} is not acceptable')
err = checker.check()
if err:
errors = '; '.join(err)
raise ValueError(
f'Subfield entity class of field `{self.field_name}` '
f'({field_cls}) has type errors: {errors}')
self.checked.add(field_cls)
return True
def check(self):
return self._check(self.field_cls, 0)
class DataclassChecker:
"""Checks if a python type and its structure conforms to Dataclass specs."""
def __init__(self, cls: type):
self.cls = cls
self.nested = []
def _err_class(self):
return f'dataclass class `{self.cls}` in module `{self.cls.__module__}`'
def _err_field(self, field: str):
return (f'field `{field}` of dataclass class `{self.cls.__name__}` '
f'in module `{self.cls.__module__}`')
def check_is_dataclass(self):
if not dataclasses.is_dataclass(self.cls):
return f'{self._err_class()} is not a dataclass'
return None
def _check_type(self, field_name, field_cls):
try:
FieldTypeChecker(field_name, field_cls).check()
return None
except ValueError as e:
return f'{e.args[0]} for {self._err_field(field_name)}'
def _check_field_type(self, field_name, field_cls):
return self._check_type(GetOriginalType(field_name), field_cls)
def _check_dataclass_members(self):
err = []
for key in self.cls.__dict__:
# pylint: disable=comparison-with-callable,unidiomatic-typecheck
if type(self.cls.__dict__[key]) == type:
self.nested.append(
(key, DataclassChecker(self.cls.__dict__[key])))
elif callable(
self.cls.__dict__[key]) and key not in _EXPECTED_FUNCTIONS:
err.append(f'{self._err_class()} has unexpected function '
f'member `{key}`')
elif (key not in _EXPECTED_DICT_KEYS and
key not in _EXPECTED_FUNCTIONS and
key not in self.cls.__annotations__):
err.append(f'{self._err_class()} has unexpected / non annotated'
f' member `{key}`: {self.cls.__dict__[key]}')
for field in dataclasses.fields(self.cls):
field_err = self._check_field_type(field.name, field.type)
if field_err is not None:
err.append(field_err)
for nested in self.nested:
for nested_err in nested[1].check():
err.append(f'{nested_err}; for nested sub-class '
f'{nested[0]} of {self._err_class()}')
return err
def check(self):
err_dataclass = self.check_is_dataclass()
if err_dataclass is not None:
return [err_dataclass]
return self._check_dataclass_members()
def SchemaAnnotations(cls: type):
"""Returns the schema annotations of a type."""
annotations = []
if hasattr(cls, _SCHEMA_ANNOTATIONS):
annotations.extend(cls.__schema_annotations__)
return annotations
| #
# nuna_sql_tools: Copyright 2022 Nuna Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilityes for checking and."""
import dataclasses
import datetime
import decimal
from types import ModuleType
from typing import NewType, Union
# In your data declaration python modules define a JAVA_PACKAGE
# variable at top level to specify the corresponding Java package of generated
# classes.
JAVA_PACKAGE = 'JAVA_PACKAGE'
def GetJavaPackage(module: ModuleType) -> str:
if hasattr(module, JAVA_PACKAGE):
return getattr(module, JAVA_PACKAGE)
else:
return module.__name__
_SCHEMA_ANNOTATIONS = '__schema_annotations__'
_EXPECTED_DICT_KEYS = set([
'__module__', '__annotations__', '__doc__', '__dict__', '__weakref__',
'__dataclass_params__', '__dataclass_fields__', _SCHEMA_ANNOTATIONS
])
_EXPECTED_FUNCTIONS = ['__init__', '__repr__', '__eq__', '__hash__']
_BASE_TYPES = set([
int, bytes, str, float, bool, datetime.date, datetime.datetime,
decimal.Decimal
])
_SCHEMA_ANNOTATIONS = '__schema_annotations__'
_CLASS_ID = 0
def _Annotate(cls=None, annotation=None):
"""Annotates a class or a type. `annotation` should from annotation.py"""
def Wrap(cls):
schema_annotations = []
if hasattr(cls, _SCHEMA_ANNOTATIONS):
schema_annotations.extend(getattr(cls, _SCHEMA_ANNOTATIONS))
if isinstance(annotation, list):
schema_annotations.extend(annotation)
else:
schema_annotations.append(annotation)
global _CLASS_ID
_CLASS_ID += 1
supertype = cls
if hasattr(cls, '__supertype__'):
supertype = cls.__supertype__
annotated_type = NewType(f'Annotated_{_CLASS_ID}', supertype)
setattr(annotated_type, _SCHEMA_ANNOTATIONS, schema_annotations)
return annotated_type
if cls is None:
return Wrap
return Wrap(cls)
def Annotate(cls, annotation):
"""Annotates a field type with the provided annotation."""
return _Annotate(cls, annotation=annotation)
def IsAnnotatedType(field_cls: type):
"""If provided field_cls is an annotated type."""
return hasattr(field_cls, _SCHEMA_ANNOTATIONS)
def GetAnnotatedType(field_cls: type):
"""Returns the original type behind the annotation (if any)."""
if IsAnnotatedType(field_cls) and hasattr(field_cls, '__supertype__'):
return field_cls.__supertype__
return field_cls
def IsOptionalType(field_cls: type):
"""If the field_cls looks like an Optional[...] type."""
return (hasattr(field_cls, '__origin__')
# pylint: disable=comparison-with-callable
and field_cls.__origin__ == Union and len(field_cls.__args__) == 2
and field_cls.__args__[1] == type(None))
def GetOptionalType(field_cls: type):
"""Returns the type of optional & annotation or None if not optional."""
field_cls = GetAnnotatedType(field_cls)
if IsOptionalType(field_cls):
return field_cls.__args__[0]
return None
def GetOriginalType(field_cls: type):
"""Returns the type of field_cls, behind annotations and Optional."""
field_cls = GetAnnotatedType(field_cls)
if IsOptionalType(field_cls):
return field_cls.__args__[0]
return field_cls
def GetStructuredTypeName(field_cls: type):
"""Returns the structure type name for a type, behind annotation."""
field_cls = GetAnnotatedType(field_cls)
if not hasattr(field_cls, '__origin__'):
return None
if field_cls.__origin__ is dict:
return 'dict'
elif field_cls.__origin__ is list:
return 'list'
elif field_cls.__origin__ is set:
return 'set'
return None
def IsBasicType(field_cls: type):
"""If the type field_cls looks like one of the basic field types."""
if GetAnnotatedType(field_cls) in _BASE_TYPES:
return True
_MAX_DEPTH = 30
class FieldTypeChecker:
"""Checks the type of a fields in a dataclass."""
def __init__(self, field_name, field_cls):
self.field_name = field_name
self.field_cls = field_cls
self.checked = set()
def _check(self, field_cls, depth):
"""Check if the type of a field is acceptable."""
if field_cls in self.checked:
return True
if depth > _MAX_DEPTH:
raise ValueError(f'Recursive field type found at {field_cls} '
f'for field `{self.field_name}`')
field_cls = GetAnnotatedType(field_cls)
if IsBasicType(field_cls):
return True
if hasattr(field_cls, '__origin__'):
if field_cls.__origin__ is dict:
self._check(field_cls.__args__[0], depth)
self._check(field_cls.__args__[1], depth)
elif field_cls.__origin__ is list:
self._check(field_cls.__args__[0], depth)
elif field_cls.__origin__ is set:
self._check(field_cls.__args__[0], depth)
elif ( # pylint: disable=comparison-with-callable
field_cls.__origin__ == Union and
len(field_cls.__args__) == 2 and
field_cls.__args__[1] == type(None)):
if GetStructuredTypeName(field_cls) is not None:
raise ValueError('Cannot have Optional structured fields.'
'(e.g. Optional[List or Set or Dict])')
# Optional[...]
self._check(field_cls.__args__[0], depth)
else:
raise ValueError(f'Invalid origin class for {field_cls}: '
f'`{field_cls.__origin__}`')
else:
checker = DataclassChecker(field_cls)
if checker.check_is_dataclass() is not None:
raise ValueError(
f'Invalid type surfaced for field `{self.field_name}`: '
f'`{self.field_cls}` - {field_cls} is not acceptable')
err = checker.check()
if err:
errors = '; '.join(err)
raise ValueError(
f'Subfield entity class of field `{self.field_name}` '
f'({field_cls}) has type errors: {errors}')
self.checked.add(field_cls)
return True
def check(self):
return self._check(self.field_cls, 0)
class DataclassChecker:
"""Checks if a python type and its structure conforms to Dataclass specs."""
def __init__(self, cls: type):
self.cls = cls
self.nested = []
def _err_class(self):
return f'dataclass class `{self.cls}` in module `{self.cls.__module__}`'
def _err_field(self, field: str):
return (f'field `{field}` of dataclass class `{self.cls.__name__}` '
f'in module `{self.cls.__module__}`')
def check_is_dataclass(self):
if not dataclasses.is_dataclass(self.cls):
return f'{self._err_class()} is not a dataclass'
return None
def _check_type(self, field_name, field_cls):
try:
FieldTypeChecker(field_name, field_cls).check()
return None
except ValueError as e:
return f'{e.args[0]} for {self._err_field(field_name)}'
def _check_field_type(self, field_name, field_cls):
return self._check_type(GetOriginalType(field_name), field_cls)
def _check_dataclass_members(self):
err = []
for key in self.cls.__dict__:
# pylint: disable=comparison-with-callable,unidiomatic-typecheck
if type(self.cls.__dict__[key]) == type:
self.nested.append(
(key, DataclassChecker(self.cls.__dict__[key])))
elif callable(
self.cls.__dict__[key]) and key not in _EXPECTED_FUNCTIONS:
err.append(f'{self._err_class()} has unexpected function '
f'member `{key}`')
elif (key not in _EXPECTED_DICT_KEYS and
key not in _EXPECTED_FUNCTIONS and
key not in self.cls.__annotations__):
err.append(f'{self._err_class()} has unexpected / non annotated'
f' member `{key}`: {self.cls.__dict__[key]}')
for field in dataclasses.fields(self.cls):
field_err = self._check_field_type(field.name, field.type)
if field_err is not None:
err.append(field_err)
for nested in self.nested:
for nested_err in nested[1].check():
err.append(f'{nested_err}; for nested sub-class '
f'{nested[0]} of {self._err_class()}')
return err
def check(self):
err_dataclass = self.check_is_dataclass()
if err_dataclass is not None:
return [err_dataclass]
return self._check_dataclass_members()
def SchemaAnnotations(cls: type):
"""Returns the schema annotations of a type."""
annotations = []
if hasattr(cls, _SCHEMA_ANNOTATIONS):
annotations.extend(cls.__schema_annotations__)
return annotations
| en | 0.740604 | # # nuna_sql_tools: Copyright 2022 Nuna Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Utilityes for checking and. # In your data declaration python modules define a JAVA_PACKAGE # variable at top level to specify the corresponding Java package of generated # classes. Annotates a class or a type. `annotation` should from annotation.py Annotates a field type with the provided annotation. If provided field_cls is an annotated type. Returns the original type behind the annotation (if any). If the field_cls looks like an Optional[...] type. # pylint: disable=comparison-with-callable Returns the type of optional & annotation or None if not optional. Returns the type of field_cls, behind annotations and Optional. Returns the structure type name for a type, behind annotation. If the type field_cls looks like one of the basic field types. Checks the type of a fields in a dataclass. Check if the type of a field is acceptable. # pylint: disable=comparison-with-callable # Optional[...] Checks if a python type and its structure conforms to Dataclass specs. # pylint: disable=comparison-with-callable,unidiomatic-typecheck Returns the schema annotations of a type. | 2.023992 | 2 |
Data_and_Dicts.py | melkisedeath/Harmonic_Analysis_and_Trajectory | 0 | 9492 | <filename>Data_and_Dicts.py
"""HERE are the base Points for all valid Tonnetze Systems.
A period of all 12 notes divided by mod 3, mod 4 (always stable)
"""
# x = 4, y = 3
NotePointsT345 = {
0: (0, 0),
1: (1, 3),
2: (2, 2),
3: (0, 1),
4: (1, 0),
5: (2, 3),
6: (0, 2),
7: (1, 1),
8: (2, 0),
9: (0, 3),
10: (1, 2),
11: (2, 1)
}
# x = 8, y = 3
NotePointsT138 = {
0: (0, 0),
1: (2, 3),
2: (1, 2),
3: (0, 1),
4: (2, 0),
5: (1, 3),
6: (0, 2),
7: (2, 1),
8: (1, 0),
9: (0, 3),
10: (2, 2),
11: (1, 1)
}
# x = 2, y = 9
NotePointsT129 = {
0: (0, 0),
1: (2, 1),
2: (1, 0),
3: (0, 3),
4: (2, 0),
5: (1, 3),
6: (0, 2),
7: (2, 3),
8: (1, 2),
9: (0, 1),
10: (2, 2),
11: (1, 1)
}
# x = 4, y = 1
NotePointsT147 = {
0: (0, 0),
1: (0, 1),
2: (0, 2),
3: (0, 3),
4: (1, 0),
5: (1, 1),
6: (1, 2),
7: (1, 3),
8: (2, 0),
9: (2, 1),
10: (2, 2),
11: (2, 3)
}
# x = 2, y = 3
NotePointsT237 = {
0: (0, 0),
1: (2, 3),
2: (1, 0),
3: (0, 1),
4: (2, 0),
5: (1, 1),
6: (0, 2),
7: (2, 1),
8: (1, 2),
9: (0, 3),
10: (2, 2),
11: (1, 3)
}
dictOfTonnetz = {
'T345': NotePointsT345,
'T147': NotePointsT147,
'T138': NotePointsT138,
'T237': NotePointsT237,
'T129': NotePointsT129
}
dictOfTonnetze = {
'T129': [1, 2, 9],
'T138': [1, 3, 8],
'T147': [1, 4, 7],
'T156': [1, 5, 6],
'T237': [2, 3, 7],
'T345': [3, 4, 5]
}
| <filename>Data_and_Dicts.py
"""HERE are the base Points for all valid Tonnetze Systems.
A period of all 12 notes divided by mod 3, mod 4 (always stable)
"""
# x = 4, y = 3
NotePointsT345 = {
0: (0, 0),
1: (1, 3),
2: (2, 2),
3: (0, 1),
4: (1, 0),
5: (2, 3),
6: (0, 2),
7: (1, 1),
8: (2, 0),
9: (0, 3),
10: (1, 2),
11: (2, 1)
}
# x = 8, y = 3
NotePointsT138 = {
0: (0, 0),
1: (2, 3),
2: (1, 2),
3: (0, 1),
4: (2, 0),
5: (1, 3),
6: (0, 2),
7: (2, 1),
8: (1, 0),
9: (0, 3),
10: (2, 2),
11: (1, 1)
}
# x = 2, y = 9
NotePointsT129 = {
0: (0, 0),
1: (2, 1),
2: (1, 0),
3: (0, 3),
4: (2, 0),
5: (1, 3),
6: (0, 2),
7: (2, 3),
8: (1, 2),
9: (0, 1),
10: (2, 2),
11: (1, 1)
}
# x = 4, y = 1
NotePointsT147 = {
0: (0, 0),
1: (0, 1),
2: (0, 2),
3: (0, 3),
4: (1, 0),
5: (1, 1),
6: (1, 2),
7: (1, 3),
8: (2, 0),
9: (2, 1),
10: (2, 2),
11: (2, 3)
}
# x = 2, y = 3
NotePointsT237 = {
0: (0, 0),
1: (2, 3),
2: (1, 0),
3: (0, 1),
4: (2, 0),
5: (1, 1),
6: (0, 2),
7: (2, 1),
8: (1, 2),
9: (0, 3),
10: (2, 2),
11: (1, 3)
}
dictOfTonnetz = {
'T345': NotePointsT345,
'T147': NotePointsT147,
'T138': NotePointsT138,
'T237': NotePointsT237,
'T129': NotePointsT129
}
dictOfTonnetze = {
'T129': [1, 2, 9],
'T138': [1, 3, 8],
'T147': [1, 4, 7],
'T156': [1, 5, 6],
'T237': [2, 3, 7],
'T345': [3, 4, 5]
}
| en | 0.844549 | HERE are the base Points for all valid Tonnetze Systems. A period of all 12 notes divided by mod 3, mod 4 (always stable) # x = 4, y = 3 # x = 8, y = 3 # x = 2, y = 9 # x = 4, y = 1 # x = 2, y = 3 | 2.267211 | 2 |
awacs/proton.py | alanjjenkins/awacs | 0 | 9493 | <reponame>alanjjenkins/awacs
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Proton"
prefix = "proton"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateEnvironment = Action("CreateEnvironment")
CreateEnvironmentTemplate = Action("CreateEnvironmentTemplate")
CreateEnvironmentTemplateMajorVersion = Action("CreateEnvironmentTemplateMajorVersion")
CreateEnvironmentTemplateMinorVersion = Action("CreateEnvironmentTemplateMinorVersion")
CreateService = Action("CreateService")
CreateServiceTemplate = Action("CreateServiceTemplate")
CreateServiceTemplateMajorVersion = Action("CreateServiceTemplateMajorVersion")
CreateServiceTemplateMinorVersion = Action("CreateServiceTemplateMinorVersion")
DeleteAccountRoles = Action("DeleteAccountRoles")
DeleteEnvironment = Action("DeleteEnvironment")
DeleteEnvironmentTemplate = Action("DeleteEnvironmentTemplate")
DeleteEnvironmentTemplateMajorVersion = Action("DeleteEnvironmentTemplateMajorVersion")
DeleteEnvironmentTemplateMinorVersion = Action("DeleteEnvironmentTemplateMinorVersion")
DeleteService = Action("DeleteService")
DeleteServiceTemplate = Action("DeleteServiceTemplate")
DeleteServiceTemplateMajorVersion = Action("DeleteServiceTemplateMajorVersion")
DeleteServiceTemplateMinorVersion = Action("DeleteServiceTemplateMinorVersion")
GetAccountRoles = Action("GetAccountRoles")
GetEnvironment = Action("GetEnvironment")
GetEnvironmentTemplate = Action("GetEnvironmentTemplate")
GetEnvironmentTemplateMajorVersion = Action("GetEnvironmentTemplateMajorVersion")
GetEnvironmentTemplateMinorVersion = Action("GetEnvironmentTemplateMinorVersion")
GetService = Action("GetService")
GetServiceInstance = Action("GetServiceInstance")
GetServiceTemplate = Action("GetServiceTemplate")
GetServiceTemplateMajorVersion = Action("GetServiceTemplateMajorVersion")
GetServiceTemplateMinorVersion = Action("GetServiceTemplateMinorVersion")
ListEnvironmentTemplateMajorVersions = Action("ListEnvironmentTemplateMajorVersions")
ListEnvironmentTemplateMinorVersions = Action("ListEnvironmentTemplateMinorVersions")
ListEnvironmentTemplates = Action("ListEnvironmentTemplates")
ListEnvironments = Action("ListEnvironments")
ListServiceInstances = Action("ListServiceInstances")
ListServiceTemplateMajorVersions = Action("ListServiceTemplateMajorVersions")
ListServiceTemplateMinorVersions = Action("ListServiceTemplateMinorVersions")
ListServiceTemplates = Action("ListServiceTemplates")
ListServices = Action("ListServices")
ListTagsForResource = Action("ListTagsForResource")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateAccountRoles = Action("UpdateAccountRoles")
UpdateEnvironment = Action("UpdateEnvironment")
UpdateEnvironmentTemplate = Action("UpdateEnvironmentTemplate")
UpdateEnvironmentTemplateMajorVersion = Action("UpdateEnvironmentTemplateMajorVersion")
UpdateEnvironmentTemplateMinorVersion = Action("UpdateEnvironmentTemplateMinorVersion")
UpdateService = Action("UpdateService")
UpdateServiceInstance = Action("UpdateServiceInstance")
UpdateServicePipeline = Action("UpdateServicePipeline")
UpdateServiceTemplate = Action("UpdateServiceTemplate")
UpdateServiceTemplateMajorVersion = Action("UpdateServiceTemplateMajorVersion")
UpdateServiceTemplateMinorVersion = Action("UpdateServiceTemplateMinorVersion")
| # Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Proton"
prefix = "proton"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateEnvironment = Action("CreateEnvironment")
CreateEnvironmentTemplate = Action("CreateEnvironmentTemplate")
CreateEnvironmentTemplateMajorVersion = Action("CreateEnvironmentTemplateMajorVersion")
CreateEnvironmentTemplateMinorVersion = Action("CreateEnvironmentTemplateMinorVersion")
CreateService = Action("CreateService")
CreateServiceTemplate = Action("CreateServiceTemplate")
CreateServiceTemplateMajorVersion = Action("CreateServiceTemplateMajorVersion")
CreateServiceTemplateMinorVersion = Action("CreateServiceTemplateMinorVersion")
DeleteAccountRoles = Action("DeleteAccountRoles")
DeleteEnvironment = Action("DeleteEnvironment")
DeleteEnvironmentTemplate = Action("DeleteEnvironmentTemplate")
DeleteEnvironmentTemplateMajorVersion = Action("DeleteEnvironmentTemplateMajorVersion")
DeleteEnvironmentTemplateMinorVersion = Action("DeleteEnvironmentTemplateMinorVersion")
DeleteService = Action("DeleteService")
DeleteServiceTemplate = Action("DeleteServiceTemplate")
DeleteServiceTemplateMajorVersion = Action("DeleteServiceTemplateMajorVersion")
DeleteServiceTemplateMinorVersion = Action("DeleteServiceTemplateMinorVersion")
GetAccountRoles = Action("GetAccountRoles")
GetEnvironment = Action("GetEnvironment")
GetEnvironmentTemplate = Action("GetEnvironmentTemplate")
GetEnvironmentTemplateMajorVersion = Action("GetEnvironmentTemplateMajorVersion")
GetEnvironmentTemplateMinorVersion = Action("GetEnvironmentTemplateMinorVersion")
GetService = Action("GetService")
GetServiceInstance = Action("GetServiceInstance")
GetServiceTemplate = Action("GetServiceTemplate")
GetServiceTemplateMajorVersion = Action("GetServiceTemplateMajorVersion")
GetServiceTemplateMinorVersion = Action("GetServiceTemplateMinorVersion")
ListEnvironmentTemplateMajorVersions = Action("ListEnvironmentTemplateMajorVersions")
ListEnvironmentTemplateMinorVersions = Action("ListEnvironmentTemplateMinorVersions")
ListEnvironmentTemplates = Action("ListEnvironmentTemplates")
ListEnvironments = Action("ListEnvironments")
ListServiceInstances = Action("ListServiceInstances")
ListServiceTemplateMajorVersions = Action("ListServiceTemplateMajorVersions")
ListServiceTemplateMinorVersions = Action("ListServiceTemplateMinorVersions")
ListServiceTemplates = Action("ListServiceTemplates")
ListServices = Action("ListServices")
ListTagsForResource = Action("ListTagsForResource")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateAccountRoles = Action("UpdateAccountRoles")
UpdateEnvironment = Action("UpdateEnvironment")
UpdateEnvironmentTemplate = Action("UpdateEnvironmentTemplate")
UpdateEnvironmentTemplateMajorVersion = Action("UpdateEnvironmentTemplateMajorVersion")
UpdateEnvironmentTemplateMinorVersion = Action("UpdateEnvironmentTemplateMinorVersion")
UpdateService = Action("UpdateService")
UpdateServiceInstance = Action("UpdateServiceInstance")
UpdateServicePipeline = Action("UpdateServicePipeline")
UpdateServiceTemplate = Action("UpdateServiceTemplate")
UpdateServiceTemplateMajorVersion = Action("UpdateServiceTemplateMajorVersion")
UpdateServiceTemplateMinorVersion = Action("UpdateServiceTemplateMinorVersion") | en | 0.745275 | # Copyright (c) 2012-2021, <NAME> <<EMAIL>> # All rights reserved. # # See LICENSE file for full license. | 2.199102 | 2 |
src/error.py | LydiaMelles/relativum | 0 | 9494 | class RequirementsNotMetError(Exception):
"""For SQL INSERT, missing table attributes."""
def __init__(self, message):
super().__init__(message)
class AuthenticationError(Exception):
"""Generic authentication error."""
def __init__(self, message):
super().__init__(message)
| class RequirementsNotMetError(Exception):
"""For SQL INSERT, missing table attributes."""
def __init__(self, message):
super().__init__(message)
class AuthenticationError(Exception):
"""Generic authentication error."""
def __init__(self, message):
super().__init__(message)
| en | 0.295452 | For SQL INSERT, missing table attributes. Generic authentication error. | 2.772172 | 3 |
jaxline/utils_test.py | lorenrose1013/jaxline | 1 | 9495 | <filename>jaxline/utils_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jaxline's utils."""
import functools
import itertools as it
import time
from unittest import mock
from absl.testing import absltest
from absl.testing import flagsaver
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
class PyPrefetchTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(list(utils.py_prefetch(lambda: ())), [])
def testBaseCase(self):
self.assertEqual(list(utils.py_prefetch(lambda: range(100))),
list(range(100)))
def testBadFunction(self):
def _bad_function():
raise ValueError
iterable = utils.py_prefetch(_bad_function)
with self.assertRaises(ValueError):
next(iterable)
def testBadFunctionIteration(self):
def _bad_iterable():
yield 1
raise ValueError
iterable = utils.py_prefetch(_bad_iterable)
self.assertEqual(next(iterable), 1)
with self.assertRaises(ValueError):
next(iterable)
class TreePsumTest(absltest.TestCase):
def testBaseCase(self):
# pick leaf objects with leading dimension one as these tests will
# be run on a single device.
data = {"a": jnp.array([1]), "b": jnp.array([2])}
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testEmpty(self):
data = {"a": jnp.array([]), "b": jnp.array([])}
with self.assertRaises(ZeroDivisionError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testSingleLeafTree(self):
data = jnp.array([1])
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testNotNumpy(self):
data = [1]
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNumDevicesMismatch(self):
data = jnp.array([1, 2]) # assumes 2 devices but we only have 1
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNoPmapWrapper(self):
with self.assertRaises(NameError): # axis_name will be undefined
utils.tree_psum(jnp.array([1]), axis_name="i")
def testAxisNameMismatch(self):
data = jnp.array([1])
with self.assertRaises(NameError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="j")(data)
class MakeAsyncTest(absltest.TestCase):
def testBaseCase(self):
"""Tests correct execution for single call."""
r = []
async_fn = utils.make_async()(lambda: r.append("a"))
async_fn()
time.sleep(1)
self.assertListEqual(r, ["a"])
def testNonBlocking(self):
"""Tests async function doesn't block the main thread."""
r = []
async_fn = utils.make_async()(lambda: r.append((time.sleep(5), "a")))
r.append((None, "b"))
async_fn().result()
self.assertListEqual(r, [(None, "b"), (None, "a")])
def testSerialExecution(self):
"""Tests multiple calls to async function execute serially."""
r = []
a = lambda: r.append((time.sleep(5), "a"))
b = lambda: r.append((None, "b"))
async_fn = utils.make_async()(lambda f: f())
async_fn(a)
async_fn(b).result()
self.assertListEqual(r, [(None, "a"), (None, "b")])
def testErrorOnNextCall(self):
"""Tests background thread error raised in main thread on next call."""
@utils.make_async()
def async_fn():
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on next call
async_fn()
def testSubsequentCallsDontRun(self):
"""Tests that subsequent calls don't run after an error has occurred."""
runs = []
@utils.make_async()
def async_fn():
runs.append(None)
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
for _ in range(2):
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on
# subsequent calls and _bad_function will not be run.
async_fn()
self.assertListEqual(runs, [None])
def testErrorInBackgroundThread(self):
"""Tests background thread raises the error."""
@utils.make_async()
def async_fn():
raise ValueError()
future = async_fn() # pylint: disable=assignment-from-no-return
self.assertIsNotNone(future.exception())
class TestBroadcast(absltest.TestCase):
def test_bcast_local_devices(self):
self.assertEqual(utils.bcast_local_devices(jnp.zeros([])),
jnp.zeros([jax.local_device_count()]))
self.assertEqual(utils.bcast_local_devices(jnp.ones([])),
jnp.ones([jax.local_device_count()]))
def test_bcast_local_devices_empty_tree(self):
self.assertIsNone(utils.bcast_local_devices(None))
self.assertEqual(utils.bcast_local_devices({}), {})
def test_bcast_local_devices_tree(self):
num_devices = jax.local_device_count()
tree = utils.bcast_local_devices({"ones": jnp.ones([]),
"zeros": jnp.zeros([])})
self.assertEqual(tree, {"ones": jnp.ones([num_devices]),
"zeros": jnp.zeros([num_devices])})
class TestLogActivity(absltest.TestCase):
@mock.patch("jaxline.utils.logging.info")
def test_log_success(self, mock_info):
"""Tests that logging an activity is successful."""
with utils.log_activity("for test"):
pass
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_info.assert_any_call("[jaxline] %s finished.", "for test")
@mock.patch("absl.logging.exception")
@mock.patch("absl.logging.info")
def test_log_failure(self, mock_info, mock_exc):
"""Tests that an error thrown by an activity is correctly caught."""
with self.assertRaisesRegex(ValueError, "Intentional"):
with utils.log_activity("for test"):
raise ValueError("Intentional")
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_exc.assert_any_call("[jaxline] %s failed with error.", "for test")
class TestSpecializeRngHostDevice(absltest.TestCase):
@classmethod
def setUpClass(cls):
super(TestSpecializeRngHostDevice, cls).setUpClass()
rng = jax.random.PRNGKey(0)
cls.rng = jnp.broadcast_to(
rng, (jax.local_device_count(),) + rng.shape)
def test_unique_device(self):
"""Tests that rngs are unique across devices."""
mode = "unique_host_unique_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(rng, axis=0).shape[0], jax.local_device_count())
def test_same_device(self):
"""Tests rngs are same across devices."""
mode = "unique_host_same_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(rng, axis=0).shape[0], 1)
def test_unique_host(self):
"""Tests rngs unique between hosts."""
mode = "unique_host_same_device"
with mock.patch.object(utils.jax, "host_id", return_value=0):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng0 = specialize_func(self.rng, host_id_devices)
with mock.patch.object(utils.jax, "host_id", return_value=1):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng1 = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(np.concatenate([rng0, rng1], axis=0), axis=0).shape[0], 2)
class TestRendezvous(absltest.TestCase):
def test_rendezvous(self):
"""Test that rendezvous doesn't fail."""
utils.rendezvous()
class TestJaxlineDisablePmapJit(absltest.TestCase):
@mock.patch.object(utils.chex, "fake_pmap_and_jit", autospec=True)
def test_pmap_jit_disabled(self, mock_fake_pmap_and_jit):
"""Tests pmap/jit are disabled if --jaxline_disable_pmap_jit is set."""
with self.subTest("PmapJitNotDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=False):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_not_called()
with self.subTest("PmapJitDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=True):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_called_once()
class DoubleBufferTest(absltest.TestCase):
def test_double_buffer(self):
if jax.default_backend() != "gpu":
self.skipTest("Only necessary on GPU.")
n = jax.local_device_count()
dataset = it.repeat(np.ones([n]))
iterator = iter(utils.double_buffer(dataset))
batch_ptrs = []
while len(batch_ptrs) < 4:
batch = next(iterator)
ptrs = [b.unsafe_buffer_pointer() for b in batch.device_buffers]
batch_ptrs.append(ptrs)
del batch
self.assertEqual(batch_ptrs[0], batch_ptrs[2])
self.assertEqual(batch_ptrs[1], batch_ptrs[3])
self.assertNotEqual(batch_ptrs[0], batch_ptrs[1])
self.assertNotEqual(batch_ptrs[2], batch_ptrs[3])
if __name__ == "__main__":
absltest.main()
| <filename>jaxline/utils_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jaxline's utils."""
import functools
import itertools as it
import time
from unittest import mock
from absl.testing import absltest
from absl.testing import flagsaver
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
class PyPrefetchTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(list(utils.py_prefetch(lambda: ())), [])
def testBaseCase(self):
self.assertEqual(list(utils.py_prefetch(lambda: range(100))),
list(range(100)))
def testBadFunction(self):
def _bad_function():
raise ValueError
iterable = utils.py_prefetch(_bad_function)
with self.assertRaises(ValueError):
next(iterable)
def testBadFunctionIteration(self):
def _bad_iterable():
yield 1
raise ValueError
iterable = utils.py_prefetch(_bad_iterable)
self.assertEqual(next(iterable), 1)
with self.assertRaises(ValueError):
next(iterable)
class TreePsumTest(absltest.TestCase):
def testBaseCase(self):
# pick leaf objects with leading dimension one as these tests will
# be run on a single device.
data = {"a": jnp.array([1]), "b": jnp.array([2])}
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testEmpty(self):
data = {"a": jnp.array([]), "b": jnp.array([])}
with self.assertRaises(ZeroDivisionError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testSingleLeafTree(self):
data = jnp.array([1])
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testNotNumpy(self):
data = [1]
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNumDevicesMismatch(self):
data = jnp.array([1, 2]) # assumes 2 devices but we only have 1
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNoPmapWrapper(self):
with self.assertRaises(NameError): # axis_name will be undefined
utils.tree_psum(jnp.array([1]), axis_name="i")
def testAxisNameMismatch(self):
data = jnp.array([1])
with self.assertRaises(NameError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="j")(data)
class MakeAsyncTest(absltest.TestCase):
def testBaseCase(self):
"""Tests correct execution for single call."""
r = []
async_fn = utils.make_async()(lambda: r.append("a"))
async_fn()
time.sleep(1)
self.assertListEqual(r, ["a"])
def testNonBlocking(self):
"""Tests async function doesn't block the main thread."""
r = []
async_fn = utils.make_async()(lambda: r.append((time.sleep(5), "a")))
r.append((None, "b"))
async_fn().result()
self.assertListEqual(r, [(None, "b"), (None, "a")])
def testSerialExecution(self):
"""Tests multiple calls to async function execute serially."""
r = []
a = lambda: r.append((time.sleep(5), "a"))
b = lambda: r.append((None, "b"))
async_fn = utils.make_async()(lambda f: f())
async_fn(a)
async_fn(b).result()
self.assertListEqual(r, [(None, "a"), (None, "b")])
def testErrorOnNextCall(self):
"""Tests background thread error raised in main thread on next call."""
@utils.make_async()
def async_fn():
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on next call
async_fn()
def testSubsequentCallsDontRun(self):
"""Tests that subsequent calls don't run after an error has occurred."""
runs = []
@utils.make_async()
def async_fn():
runs.append(None)
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
for _ in range(2):
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on
# subsequent calls and _bad_function will not be run.
async_fn()
self.assertListEqual(runs, [None])
def testErrorInBackgroundThread(self):
"""Tests background thread raises the error."""
@utils.make_async()
def async_fn():
raise ValueError()
future = async_fn() # pylint: disable=assignment-from-no-return
self.assertIsNotNone(future.exception())
class TestBroadcast(absltest.TestCase):
def test_bcast_local_devices(self):
self.assertEqual(utils.bcast_local_devices(jnp.zeros([])),
jnp.zeros([jax.local_device_count()]))
self.assertEqual(utils.bcast_local_devices(jnp.ones([])),
jnp.ones([jax.local_device_count()]))
def test_bcast_local_devices_empty_tree(self):
self.assertIsNone(utils.bcast_local_devices(None))
self.assertEqual(utils.bcast_local_devices({}), {})
def test_bcast_local_devices_tree(self):
num_devices = jax.local_device_count()
tree = utils.bcast_local_devices({"ones": jnp.ones([]),
"zeros": jnp.zeros([])})
self.assertEqual(tree, {"ones": jnp.ones([num_devices]),
"zeros": jnp.zeros([num_devices])})
class TestLogActivity(absltest.TestCase):
@mock.patch("jaxline.utils.logging.info")
def test_log_success(self, mock_info):
"""Tests that logging an activity is successful."""
with utils.log_activity("for test"):
pass
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_info.assert_any_call("[jaxline] %s finished.", "for test")
@mock.patch("absl.logging.exception")
@mock.patch("absl.logging.info")
def test_log_failure(self, mock_info, mock_exc):
"""Tests that an error thrown by an activity is correctly caught."""
with self.assertRaisesRegex(ValueError, "Intentional"):
with utils.log_activity("for test"):
raise ValueError("Intentional")
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_exc.assert_any_call("[jaxline] %s failed with error.", "for test")
class TestSpecializeRngHostDevice(absltest.TestCase):
@classmethod
def setUpClass(cls):
super(TestSpecializeRngHostDevice, cls).setUpClass()
rng = jax.random.PRNGKey(0)
cls.rng = jnp.broadcast_to(
rng, (jax.local_device_count(),) + rng.shape)
def test_unique_device(self):
"""Tests that rngs are unique across devices."""
mode = "unique_host_unique_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(rng, axis=0).shape[0], jax.local_device_count())
def test_same_device(self):
"""Tests rngs are same across devices."""
mode = "unique_host_same_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(rng, axis=0).shape[0], 1)
def test_unique_host(self):
"""Tests rngs unique between hosts."""
mode = "unique_host_same_device"
with mock.patch.object(utils.jax, "host_id", return_value=0):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng0 = specialize_func(self.rng, host_id_devices)
with mock.patch.object(utils.jax, "host_id", return_value=1):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng1 = specialize_func(self.rng, host_id_devices)
self.assertEqual(
np.unique(np.concatenate([rng0, rng1], axis=0), axis=0).shape[0], 2)
class TestRendezvous(absltest.TestCase):
def test_rendezvous(self):
"""Test that rendezvous doesn't fail."""
utils.rendezvous()
class TestJaxlineDisablePmapJit(absltest.TestCase):
@mock.patch.object(utils.chex, "fake_pmap_and_jit", autospec=True)
def test_pmap_jit_disabled(self, mock_fake_pmap_and_jit):
"""Tests pmap/jit are disabled if --jaxline_disable_pmap_jit is set."""
with self.subTest("PmapJitNotDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=False):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_not_called()
with self.subTest("PmapJitDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=True):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_called_once()
class DoubleBufferTest(absltest.TestCase):
def test_double_buffer(self):
if jax.default_backend() != "gpu":
self.skipTest("Only necessary on GPU.")
n = jax.local_device_count()
dataset = it.repeat(np.ones([n]))
iterator = iter(utils.double_buffer(dataset))
batch_ptrs = []
while len(batch_ptrs) < 4:
batch = next(iterator)
ptrs = [b.unsafe_buffer_pointer() for b in batch.device_buffers]
batch_ptrs.append(ptrs)
del batch
self.assertEqual(batch_ptrs[0], batch_ptrs[2])
self.assertEqual(batch_ptrs[1], batch_ptrs[3])
self.assertNotEqual(batch_ptrs[0], batch_ptrs[1])
self.assertNotEqual(batch_ptrs[2], batch_ptrs[3])
if __name__ == "__main__":
absltest.main()
| en | 0.892031 | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for jaxline's utils. # pick leaf objects with leading dimension one as these tests will # be run on a single device. # assumes 2 devices but we only have 1 # axis_name will be undefined Tests correct execution for single call. Tests async function doesn't block the main thread. Tests multiple calls to async function execute serially. Tests background thread error raised in main thread on next call. # First call will trigger an error in the background thread. # Background thread error will be raised in the main thread on next call Tests that subsequent calls don't run after an error has occurred. # First call will trigger an error in the background thread. # Background thread error will be raised in the main thread on # subsequent calls and _bad_function will not be run. Tests background thread raises the error. # pylint: disable=assignment-from-no-return Tests that logging an activity is successful. Tests that an error thrown by an activity is correctly caught. Tests that rngs are unique across devices. Tests rngs are same across devices. Tests rngs unique between hosts. Test that rendezvous doesn't fail. Tests pmap/jit are disabled if --jaxline_disable_pmap_jit is set. | 2.579058 | 3 |
test/unit/mysql_class/slaverep_isslverror.py | deepcoder42/mysql-lib | 1 | 9496 | #!/usr/bin/python
# Classification (U)
"""Program: slaverep_isslverror.py
Description: Unit testing of SlaveRep.is_slv_error in mysql_class.py.
Usage:
test/unit/mysql_class/slaverep_isslverror.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_class
import lib.machine as machine
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_slv_both_true -> Test with all attrs set to True.
test_sql_err_true -> Test with sql_err set to True.
test_io_err_true -> Test with io_err set to True.
test_default -> Test show_slv_state method.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mysql_Server"
self.server_id = 10
self.sql_user = "mysql_user"
self.sql_pass = "<PASSWORD>"
self.machine = getattr(machine, "Linux")()
self.host = "host_server"
self.port = 3307
self.defaults_file = "def_cfg_file"
self.extra_def_file = "extra_cfg_file"
def test_slv_both_true(self):
"""Function: test_slv_both_true
Description: Test with all attrs set to True.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = "Yes"
mysqlrep.io_err = "Yes"
self.assertTrue(mysqlrep.is_slv_error())
def test_sql_err_true(self):
"""Function: test_sql_err_true
Description: Test with sql_err set to True.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = "Yes"
mysqlrep.io_err = None
self.assertTrue(mysqlrep.is_slv_error())
def test_io_err_true(self):
"""Function: test_io_err_true
Description: Test with io_err set to True.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = None
mysqlrep.io_err = "Yes"
self.assertTrue(mysqlrep.is_slv_error())
def test_default(self):
"""Function: test_default
Description: Test is_slv_error method.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = None
mysqlrep.io_err = None
self.assertFalse(mysqlrep.is_slv_error())
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/python
# Classification (U)
"""Program: slaverep_isslverror.py
Description: Unit testing of SlaveRep.is_slv_error in mysql_class.py.
Usage:
test/unit/mysql_class/slaverep_isslverror.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_class
import lib.machine as machine
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_slv_both_true -> Test with all attrs set to True.
test_sql_err_true -> Test with sql_err set to True.
test_io_err_true -> Test with io_err set to True.
test_default -> Test show_slv_state method.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mysql_Server"
self.server_id = 10
self.sql_user = "mysql_user"
self.sql_pass = "<PASSWORD>"
self.machine = getattr(machine, "Linux")()
self.host = "host_server"
self.port = 3307
self.defaults_file = "def_cfg_file"
self.extra_def_file = "extra_cfg_file"
def test_slv_both_true(self):
"""Function: test_slv_both_true
Description: Test with all attrs set to True.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = "Yes"
mysqlrep.io_err = "Yes"
self.assertTrue(mysqlrep.is_slv_error())
def test_sql_err_true(self):
"""Function: test_sql_err_true
Description: Test with sql_err set to True.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = "Yes"
mysqlrep.io_err = None
self.assertTrue(mysqlrep.is_slv_error())
def test_io_err_true(self):
"""Function: test_io_err_true
Description: Test with io_err set to True.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = None
mysqlrep.io_err = "Yes"
self.assertTrue(mysqlrep.is_slv_error())
def test_default(self):
"""Function: test_default
Description: Test is_slv_error method.
Arguments:
"""
mysqlrep = mysql_class.SlaveRep(self.name, self.server_id,
self.sql_user, self.sql_pass,
self.machine,
defaults_file=self.defaults_file)
mysqlrep.sql_err = None
mysqlrep.io_err = None
self.assertFalse(mysqlrep.is_slv_error())
if __name__ == "__main__":
unittest.main()
| en | 0.576658 | #!/usr/bin/python # Classification (U) Program: slaverep_isslverror.py Description: Unit testing of SlaveRep.is_slv_error in mysql_class.py. Usage: test/unit/mysql_class/slaverep_isslverror.py Arguments: # Libraries and Global Variables # Standard # Third-party # Local Class: UnitTest Description: Class which is a representation of a unit testing. Methods: setUp -> Initialize testing environment. test_slv_both_true -> Test with all attrs set to True. test_sql_err_true -> Test with sql_err set to True. test_io_err_true -> Test with io_err set to True. test_default -> Test show_slv_state method. Function: setUp Description: Initialization for unit testing. Arguments: Function: test_slv_both_true Description: Test with all attrs set to True. Arguments: Function: test_sql_err_true Description: Test with sql_err set to True. Arguments: Function: test_io_err_true Description: Test with io_err set to True. Arguments: Function: test_default Description: Test is_slv_error method. Arguments: | 2.582985 | 3 |
problems/108.py | mengshun/Leetcode | 0 | 9497 | """
108. 将有序数组转换为二叉搜索树
"""
from TreeNode import TreeNode
class Solution:
def sortedArrayToBST(self, nums: [int]) -> TreeNode:
def dfs(left, right):
if left > right:
return None
mid = left + (right - left) // 2
root = TreeNode(nums[mid])
root.left = dfs(left, mid-1)
root.right = dfs(mid+1, right)
return root
return dfs(0, len(nums)-1)
t = [-10,-3,0,5,9]
obj = Solution()
node = obj.sortedArrayToBST(t)
node.preorderTraversal()
| """
108. 将有序数组转换为二叉搜索树
"""
from TreeNode import TreeNode
class Solution:
def sortedArrayToBST(self, nums: [int]) -> TreeNode:
def dfs(left, right):
if left > right:
return None
mid = left + (right - left) // 2
root = TreeNode(nums[mid])
root.left = dfs(left, mid-1)
root.right = dfs(mid+1, right)
return root
return dfs(0, len(nums)-1)
t = [-10,-3,0,5,9]
obj = Solution()
node = obj.sortedArrayToBST(t)
node.preorderTraversal()
| zh | 0.656805 | 108. 将有序数组转换为二叉搜索树 | 3.478027 | 3 |
src/sage/tests/books/computational-mathematics-with-sagemath/domaines_doctest.py | hsm207/sage | 1,742 | 9498 | <filename>src/sage/tests/books/computational-mathematics-with-sagemath/domaines_doctest.py<gh_stars>1000+
## -*- encoding: utf-8 -*-
"""
This file (./domaines_doctest.sage) was *autogenerated* from ./domaines.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./domaines_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./domaines.tex, line 10::
sage: x = var('x')
Sage example in ./domaines.tex, line 69::
sage: o = 12/35
sage: type(o)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 82::
sage: type(12/35)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 131::
sage: o = 720
sage: o.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 142::
sage: type(o).factor(o)
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 157::
sage: 720.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 166::
sage: o = 720 / 133
sage: o.numerator().factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 253::
sage: 3 * 7
21
Sage example in ./domaines.tex, line 261::
sage: (2/3) * (6/5)
4/5
Sage example in ./domaines.tex, line 267::
sage: (1 + I) * (1 - I)
2
Sage example in ./domaines.tex, line 274::
sage: (x + 2) * (x + 1)
(x + 2)*(x + 1)
sage: (x + 1) * (x + 2)
(x + 2)*(x + 1)
Sage example in ./domaines.tex, line 308::
sage: def fourth_power(a):
....: a = a * a
....: a = a * a
....: return a
Sage example in ./domaines.tex, line 330::
sage: fourth_power(2)
16
sage: fourth_power(3/2)
81/16
sage: fourth_power(I)
1
sage: fourth_power(x+1)
(x + 1)^4
sage: M = matrix([[0,-1],[1,0]]); M
[ 0 -1]
[ 1 0]
sage: fourth_power(M)
[1 0]
[0 1]
Sage example in ./domaines.tex, line 375::
sage: t = type(5/1); t
<... 'sage.rings.rational.Rational'>
sage: t == type(5)
False
Sage example in ./domaines.tex, line 476::
sage: a = 5; a
5
sage: a.is_unit()
False
Sage example in ./domaines.tex, line 484::
sage: a = 5/1; a
5
sage: a.is_unit()
True
Sage example in ./domaines.tex, line 507::
sage: parent(5)
Integer Ring
sage: parent(5/1)
Rational Field
Sage example in ./domaines.tex, line 515::
sage: ZZ
Integer Ring
sage: QQ
Rational Field
Sage example in ./domaines.tex, line 525::
sage: QQ(5).parent()
Rational Field
sage: ZZ(5/1).parent()
Integer Ring
sage: ZZ(1/5)
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
Sage example in ./domaines.tex, line 543::
sage: ZZ(1), QQ(1), RR(1), CC(1)
(1, 1, 1.00000000000000, 1.00000000000000)
Sage example in ./domaines.tex, line 568::
sage: cartesian_product([QQ, QQ])
The Cartesian product of (Rational Field, Rational Field)
Sage example in ./domaines.tex, line 574::
sage: ZZ.fraction_field()
Rational Field
Sage example in ./domaines.tex, line 580::
sage: ZZ['x']
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 591::
sage: Z5 = GF(5); Z5
Finite Field of size 5
sage: P = Z5['x']; P
Univariate Polynomial Ring in x over Finite Field of size 5
sage: M = MatrixSpace(P, 3, 3); M
Full MatrixSpace of 3 by 3 dense matrices over
Univariate Polynomial Ring in x over Finite Field of size 5
Sage example in ./domaines.tex, line 602::
sage: M.random_element() # random
[2*x^2 + 3*x + 4 4*x^2 + 2*x + 2 4*x^2 + 2*x]
[ 3*x 2*x^2 + x + 3 3*x^2 + 4*x]
[ 4*x^2 + 3 3*x^2 + 2*x + 4 2*x + 4]
Sage example in ./domaines.tex, line 697::
sage: QQ.category()
Join of Category of number fields and Category of quotient fields and Category of metric spaces
Sage example in ./domaines.tex, line 704::
sage: QQ in Fields()
True
Sage example in ./domaines.tex, line 712::
sage: QQ in CommutativeAdditiveGroups()
True
Sage example in ./domaines.tex, line 718::
sage: QQ['x'] in EuclideanDomains()
True
Sage example in ./domaines.tex, line 859::
sage: 5.parent()
Integer Ring
Sage example in ./domaines.tex, line 872::
sage: type(factor(4))
<class 'sage.structure.factorization_integer.IntegerFactorization'>
Sage example in ./domaines.tex, line 895::
sage: int(5)
5
sage: type(int(5))
<... 'int'>
Sage example in ./domaines.tex, line 909::
sage: Integer(5)
5
sage: type(Integer(5))
<... 'sage.rings.integer.Integer'>
Sage example in ./domaines.tex, line 926::
sage: factorial(99) / factorial(100) - 1 / 50
-1/100
Sage example in ./domaines.tex, line 974::
sage: 72/53 - 5/3 * 2.7
-3.14150943396227
Sage example in ./domaines.tex, line 982::
sage: cos(1), cos(1.)
(cos(1), 0.540302305868140)
Sage example in ./domaines.tex, line 1000::
sage: pi.n(digits=50) # variant: n(pi,digits=50)
3.1415926535897932384626433832795028841971693993751
Sage example in ./domaines.tex, line 1020::
sage: z = CC(1,2); z.arg()
1.10714871779409
Sage example in ./domaines.tex, line 1036::
sage: I.parent()
Number Field in I with defining polynomial x^2 + 1 with I = 1*I
Sage example in ./domaines.tex, line 1043::
sage: (1.+2.*I).parent()
Complex Field with 53 bits of precision
sage: (1.+2.*SR(I)).parent()
Symbolic Ring
Sage example in ./domaines.tex, line 1064::
sage: z = 3 * exp(I*pi/4)
sage: z.real(), z.imag(), z.abs().canonicalize_radical()
(3/2*sqrt(2), 3/2*sqrt(2), 3)
Sage example in ./domaines.tex, line 1094::
sage: a, b, c = 0, 2, 3
sage: a == 1 or (b == 2 and c == 3)
True
Sage example in ./domaines.tex, line 1147::
sage: x, y = var('x, y')
sage: bool( (x-y)*(x+y) == x^2-y^2 )
True
Sage example in ./domaines.tex, line 1171::
sage: Z4 = IntegerModRing(4); Z4
Ring of integers modulo 4
sage: m = Z4(7); m
3
Sage example in ./domaines.tex, line 1184::
sage: 3 * m + 1
2
Sage example in ./domaines.tex, line 1191::
sage: Z3 = GF(3); Z3
Finite Field of size 3
Sage example in ./domaines.tex, line 1243::
sage: a = matrix(QQ, [[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1259::
sage: M = MatrixSpace(QQ,3,3); M
Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: a = M([[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1283::
sage: P = ZZ['x']; P
Univariate Polynomial Ring in x over Integer Ring
sage: F = P.fraction_field(); F
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: p = P(x+1) * P(x); p
x^2 + x
sage: p + 1/p
(x^4 + 2*x^3 + x^2 + 1)/(x^2 + x)
sage: parent(p + 1/p)
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1382::
sage: k.<a> = NumberField(x^3 + x + 1); a^3; a^4+3*a
-a - 1
-a^2 + 2*a
Sage example in ./domaines.tex, line 1416::
sage: parent(sin(x))
Symbolic Ring
Sage example in ./domaines.tex, line 1422::
sage: SR
Symbolic Ring
Sage example in ./domaines.tex, line 1428::
sage: SR.category()
Category of fields
Sage example in ./domaines.tex, line 1482::
sage: R = QQ['x1,x2,x3,x4']; R
Multivariate Polynomial Ring in x1, x2, x3, x4 over Rational Field
sage: x1, x2, x3, x4 = R.gens()
Sage example in ./domaines.tex, line 1489::
sage: x1 * (x2 - x3)
x1*x2 - x1*x3
Sage example in ./domaines.tex, line 1496::
sage: (x1+x2)*(x1-x2) - (x1^2 - x2^2)
0
Sage example in ./domaines.tex, line 1509::
sage: P = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ); P * P.lc()
x1^3*x2^2*x3 - x1^2*x2^3*x3 - x1^3*x2*x3^2 + x1*x2^3*x3^2
+ x1^2*x2*x3^3 - x1*x2^2*x3^3 - x1^3*x2^2*x4 + x1^2*x2^3*x4
+ x1^3*x3^2*x4 - x2^3*x3^2*x4 - x1^2*x3^3*x4 + x2^2*x3^3*x4
+ x1^3*x2*x4^2 - x1*x2^3*x4^2 - x1^3*x3*x4^2 + x2^3*x3*x4^2
+ x1*x3^3*x4^2 - x2*x3^3*x4^2 - x1^2*x2*x4^3 + x1*x2^2*x4^3
+ x1^2*x3*x4^3 - x2^2*x3*x4^3 - x1*x3^2*x4^3 + x2*x3^2*x4^3
Sage example in ./domaines.tex, line 1531::
sage: x1, x2, x3, x4 = SR.var('x1, x2, x3, x4')
sage: got = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) )
sage: expected1 = -(x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: expected2 = (x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: bool(got == expected1 or got == expected2)
True
Sage example in ./domaines.tex, line 1581::
sage: x = var('x')
sage: p = 54*x^4+36*x^3-102*x^2-72*x-12
sage: factor(p)
6*(x^2 - 2)*(3*x + 1)^2
Sage example in ./domaines.tex, line 1616::
sage: R = ZZ['x']; R
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1622::
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
Sage example in ./domaines.tex, line 1629::
sage: parent(q)
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1635::
sage: factor(q)
2 * 3 * (3*x + 1)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1642::
sage: R = QQ['x']; R
Univariate Polynomial Ring in x over Rational Field
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x + 1/3)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1665::
sage: R = ComplexField(16)['x']; R
Univariate Polynomial Ring in x over Complex Field
with 16 bits of precision
sage: q = R(p); q
54.00*x^4 + 36.00*x^3 - 102.0*x^2 - 72.00*x - 12.00
sage: factor(q)
(54.00) * (x - 1.414) * (x + 0.3333)^2 * (x + 1.414)
Sage example in ./domaines.tex, line 1685::
sage: R = QQ[sqrt(2)]['x']; R
Univariate Polynomial Ring in x over Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x - sqrt2) * (x + sqrt2) * (x + 1/3)^2
Sage example in ./domaines.tex, line 1698::
sage: R = GF(5)['x']; R
Univariate Polynomial Ring in x over Finite Field of size 5
sage: q = R(p); q
4*x^4 + x^3 + 3*x^2 + 3*x + 3
sage: factor(q)
(4) * (x + 2)^2 * (x^2 + 3)
"""
| <filename>src/sage/tests/books/computational-mathematics-with-sagemath/domaines_doctest.py<gh_stars>1000+
## -*- encoding: utf-8 -*-
"""
This file (./domaines_doctest.sage) was *autogenerated* from ./domaines.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./domaines_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./domaines.tex, line 10::
sage: x = var('x')
Sage example in ./domaines.tex, line 69::
sage: o = 12/35
sage: type(o)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 82::
sage: type(12/35)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 131::
sage: o = 720
sage: o.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 142::
sage: type(o).factor(o)
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 157::
sage: 720.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 166::
sage: o = 720 / 133
sage: o.numerator().factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 253::
sage: 3 * 7
21
Sage example in ./domaines.tex, line 261::
sage: (2/3) * (6/5)
4/5
Sage example in ./domaines.tex, line 267::
sage: (1 + I) * (1 - I)
2
Sage example in ./domaines.tex, line 274::
sage: (x + 2) * (x + 1)
(x + 2)*(x + 1)
sage: (x + 1) * (x + 2)
(x + 2)*(x + 1)
Sage example in ./domaines.tex, line 308::
sage: def fourth_power(a):
....: a = a * a
....: a = a * a
....: return a
Sage example in ./domaines.tex, line 330::
sage: fourth_power(2)
16
sage: fourth_power(3/2)
81/16
sage: fourth_power(I)
1
sage: fourth_power(x+1)
(x + 1)^4
sage: M = matrix([[0,-1],[1,0]]); M
[ 0 -1]
[ 1 0]
sage: fourth_power(M)
[1 0]
[0 1]
Sage example in ./domaines.tex, line 375::
sage: t = type(5/1); t
<... 'sage.rings.rational.Rational'>
sage: t == type(5)
False
Sage example in ./domaines.tex, line 476::
sage: a = 5; a
5
sage: a.is_unit()
False
Sage example in ./domaines.tex, line 484::
sage: a = 5/1; a
5
sage: a.is_unit()
True
Sage example in ./domaines.tex, line 507::
sage: parent(5)
Integer Ring
sage: parent(5/1)
Rational Field
Sage example in ./domaines.tex, line 515::
sage: ZZ
Integer Ring
sage: QQ
Rational Field
Sage example in ./domaines.tex, line 525::
sage: QQ(5).parent()
Rational Field
sage: ZZ(5/1).parent()
Integer Ring
sage: ZZ(1/5)
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
Sage example in ./domaines.tex, line 543::
sage: ZZ(1), QQ(1), RR(1), CC(1)
(1, 1, 1.00000000000000, 1.00000000000000)
Sage example in ./domaines.tex, line 568::
sage: cartesian_product([QQ, QQ])
The Cartesian product of (Rational Field, Rational Field)
Sage example in ./domaines.tex, line 574::
sage: ZZ.fraction_field()
Rational Field
Sage example in ./domaines.tex, line 580::
sage: ZZ['x']
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 591::
sage: Z5 = GF(5); Z5
Finite Field of size 5
sage: P = Z5['x']; P
Univariate Polynomial Ring in x over Finite Field of size 5
sage: M = MatrixSpace(P, 3, 3); M
Full MatrixSpace of 3 by 3 dense matrices over
Univariate Polynomial Ring in x over Finite Field of size 5
Sage example in ./domaines.tex, line 602::
sage: M.random_element() # random
[2*x^2 + 3*x + 4 4*x^2 + 2*x + 2 4*x^2 + 2*x]
[ 3*x 2*x^2 + x + 3 3*x^2 + 4*x]
[ 4*x^2 + 3 3*x^2 + 2*x + 4 2*x + 4]
Sage example in ./domaines.tex, line 697::
sage: QQ.category()
Join of Category of number fields and Category of quotient fields and Category of metric spaces
Sage example in ./domaines.tex, line 704::
sage: QQ in Fields()
True
Sage example in ./domaines.tex, line 712::
sage: QQ in CommutativeAdditiveGroups()
True
Sage example in ./domaines.tex, line 718::
sage: QQ['x'] in EuclideanDomains()
True
Sage example in ./domaines.tex, line 859::
sage: 5.parent()
Integer Ring
Sage example in ./domaines.tex, line 872::
sage: type(factor(4))
<class 'sage.structure.factorization_integer.IntegerFactorization'>
Sage example in ./domaines.tex, line 895::
sage: int(5)
5
sage: type(int(5))
<... 'int'>
Sage example in ./domaines.tex, line 909::
sage: Integer(5)
5
sage: type(Integer(5))
<... 'sage.rings.integer.Integer'>
Sage example in ./domaines.tex, line 926::
sage: factorial(99) / factorial(100) - 1 / 50
-1/100
Sage example in ./domaines.tex, line 974::
sage: 72/53 - 5/3 * 2.7
-3.14150943396227
Sage example in ./domaines.tex, line 982::
sage: cos(1), cos(1.)
(cos(1), 0.540302305868140)
Sage example in ./domaines.tex, line 1000::
sage: pi.n(digits=50) # variant: n(pi,digits=50)
3.1415926535897932384626433832795028841971693993751
Sage example in ./domaines.tex, line 1020::
sage: z = CC(1,2); z.arg()
1.10714871779409
Sage example in ./domaines.tex, line 1036::
sage: I.parent()
Number Field in I with defining polynomial x^2 + 1 with I = 1*I
Sage example in ./domaines.tex, line 1043::
sage: (1.+2.*I).parent()
Complex Field with 53 bits of precision
sage: (1.+2.*SR(I)).parent()
Symbolic Ring
Sage example in ./domaines.tex, line 1064::
sage: z = 3 * exp(I*pi/4)
sage: z.real(), z.imag(), z.abs().canonicalize_radical()
(3/2*sqrt(2), 3/2*sqrt(2), 3)
Sage example in ./domaines.tex, line 1094::
sage: a, b, c = 0, 2, 3
sage: a == 1 or (b == 2 and c == 3)
True
Sage example in ./domaines.tex, line 1147::
sage: x, y = var('x, y')
sage: bool( (x-y)*(x+y) == x^2-y^2 )
True
Sage example in ./domaines.tex, line 1171::
sage: Z4 = IntegerModRing(4); Z4
Ring of integers modulo 4
sage: m = Z4(7); m
3
Sage example in ./domaines.tex, line 1184::
sage: 3 * m + 1
2
Sage example in ./domaines.tex, line 1191::
sage: Z3 = GF(3); Z3
Finite Field of size 3
Sage example in ./domaines.tex, line 1243::
sage: a = matrix(QQ, [[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1259::
sage: M = MatrixSpace(QQ,3,3); M
Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: a = M([[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1283::
sage: P = ZZ['x']; P
Univariate Polynomial Ring in x over Integer Ring
sage: F = P.fraction_field(); F
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: p = P(x+1) * P(x); p
x^2 + x
sage: p + 1/p
(x^4 + 2*x^3 + x^2 + 1)/(x^2 + x)
sage: parent(p + 1/p)
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1382::
sage: k.<a> = NumberField(x^3 + x + 1); a^3; a^4+3*a
-a - 1
-a^2 + 2*a
Sage example in ./domaines.tex, line 1416::
sage: parent(sin(x))
Symbolic Ring
Sage example in ./domaines.tex, line 1422::
sage: SR
Symbolic Ring
Sage example in ./domaines.tex, line 1428::
sage: SR.category()
Category of fields
Sage example in ./domaines.tex, line 1482::
sage: R = QQ['x1,x2,x3,x4']; R
Multivariate Polynomial Ring in x1, x2, x3, x4 over Rational Field
sage: x1, x2, x3, x4 = R.gens()
Sage example in ./domaines.tex, line 1489::
sage: x1 * (x2 - x3)
x1*x2 - x1*x3
Sage example in ./domaines.tex, line 1496::
sage: (x1+x2)*(x1-x2) - (x1^2 - x2^2)
0
Sage example in ./domaines.tex, line 1509::
sage: P = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ); P * P.lc()
x1^3*x2^2*x3 - x1^2*x2^3*x3 - x1^3*x2*x3^2 + x1*x2^3*x3^2
+ x1^2*x2*x3^3 - x1*x2^2*x3^3 - x1^3*x2^2*x4 + x1^2*x2^3*x4
+ x1^3*x3^2*x4 - x2^3*x3^2*x4 - x1^2*x3^3*x4 + x2^2*x3^3*x4
+ x1^3*x2*x4^2 - x1*x2^3*x4^2 - x1^3*x3*x4^2 + x2^3*x3*x4^2
+ x1*x3^3*x4^2 - x2*x3^3*x4^2 - x1^2*x2*x4^3 + x1*x2^2*x4^3
+ x1^2*x3*x4^3 - x2^2*x3*x4^3 - x1*x3^2*x4^3 + x2*x3^2*x4^3
Sage example in ./domaines.tex, line 1531::
sage: x1, x2, x3, x4 = SR.var('x1, x2, x3, x4')
sage: got = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) )
sage: expected1 = -(x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: expected2 = (x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: bool(got == expected1 or got == expected2)
True
Sage example in ./domaines.tex, line 1581::
sage: x = var('x')
sage: p = 54*x^4+36*x^3-102*x^2-72*x-12
sage: factor(p)
6*(x^2 - 2)*(3*x + 1)^2
Sage example in ./domaines.tex, line 1616::
sage: R = ZZ['x']; R
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1622::
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
Sage example in ./domaines.tex, line 1629::
sage: parent(q)
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1635::
sage: factor(q)
2 * 3 * (3*x + 1)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1642::
sage: R = QQ['x']; R
Univariate Polynomial Ring in x over Rational Field
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x + 1/3)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1665::
sage: R = ComplexField(16)['x']; R
Univariate Polynomial Ring in x over Complex Field
with 16 bits of precision
sage: q = R(p); q
54.00*x^4 + 36.00*x^3 - 102.0*x^2 - 72.00*x - 12.00
sage: factor(q)
(54.00) * (x - 1.414) * (x + 0.3333)^2 * (x + 1.414)
Sage example in ./domaines.tex, line 1685::
sage: R = QQ[sqrt(2)]['x']; R
Univariate Polynomial Ring in x over Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x - sqrt2) * (x + sqrt2) * (x + 1/3)^2
Sage example in ./domaines.tex, line 1698::
sage: R = GF(5)['x']; R
Univariate Polynomial Ring in x over Finite Field of size 5
sage: q = R(p); q
4*x^4 + x^3 + 3*x^2 + 3*x + 3
sage: factor(q)
(4) * (x + 2)^2 * (x^2 + 3)
"""
| en | 0.498723 | ## -*- encoding: utf-8 -*- This file (./domaines_doctest.sage) was *autogenerated* from ./domaines.tex, with sagetex.sty version 2011/05/27 v2.3.1. It contains the contents of all the sageexample environments from this file. You should be able to doctest this file with: sage -t ./domaines_doctest.sage It is always safe to delete this file; it is not used in typesetting your document. Sage example in ./domaines.tex, line 10:: sage: x = var('x') Sage example in ./domaines.tex, line 69:: sage: o = 12/35 sage: type(o) <... 'sage.rings.rational.Rational'> Sage example in ./domaines.tex, line 82:: sage: type(12/35) <... 'sage.rings.rational.Rational'> Sage example in ./domaines.tex, line 131:: sage: o = 720 sage: o.factor() 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 142:: sage: type(o).factor(o) 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 157:: sage: 720.factor() 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 166:: sage: o = 720 / 133 sage: o.numerator().factor() 2^4 * 3^2 * 5 Sage example in ./domaines.tex, line 253:: sage: 3 * 7 21 Sage example in ./domaines.tex, line 261:: sage: (2/3) * (6/5) 4/5 Sage example in ./domaines.tex, line 267:: sage: (1 + I) * (1 - I) 2 Sage example in ./domaines.tex, line 274:: sage: (x + 2) * (x + 1) (x + 2)*(x + 1) sage: (x + 1) * (x + 2) (x + 2)*(x + 1) Sage example in ./domaines.tex, line 308:: sage: def fourth_power(a): ....: a = a * a ....: a = a * a ....: return a Sage example in ./domaines.tex, line 330:: sage: fourth_power(2) 16 sage: fourth_power(3/2) 81/16 sage: fourth_power(I) 1 sage: fourth_power(x+1) (x + 1)^4 sage: M = matrix([[0,-1],[1,0]]); M [ 0 -1] [ 1 0] sage: fourth_power(M) [1 0] [0 1] Sage example in ./domaines.tex, line 375:: sage: t = type(5/1); t <... 'sage.rings.rational.Rational'> sage: t == type(5) False Sage example in ./domaines.tex, line 476:: sage: a = 5; a 5 sage: a.is_unit() False Sage example in ./domaines.tex, line 484:: sage: a = 5/1; a 5 sage: a.is_unit() True Sage example in ./domaines.tex, line 507:: sage: parent(5) Integer Ring sage: parent(5/1) Rational Field Sage example in ./domaines.tex, line 515:: sage: ZZ Integer Ring sage: QQ Rational Field Sage example in ./domaines.tex, line 525:: sage: QQ(5).parent() Rational Field sage: ZZ(5/1).parent() Integer Ring sage: ZZ(1/5) Traceback (most recent call last): ... TypeError: no conversion of this rational to integer Sage example in ./domaines.tex, line 543:: sage: ZZ(1), QQ(1), RR(1), CC(1) (1, 1, 1.00000000000000, 1.00000000000000) Sage example in ./domaines.tex, line 568:: sage: cartesian_product([QQ, QQ]) The Cartesian product of (Rational Field, Rational Field) Sage example in ./domaines.tex, line 574:: sage: ZZ.fraction_field() Rational Field Sage example in ./domaines.tex, line 580:: sage: ZZ['x'] Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 591:: sage: Z5 = GF(5); Z5 Finite Field of size 5 sage: P = Z5['x']; P Univariate Polynomial Ring in x over Finite Field of size 5 sage: M = MatrixSpace(P, 3, 3); M Full MatrixSpace of 3 by 3 dense matrices over Univariate Polynomial Ring in x over Finite Field of size 5 Sage example in ./domaines.tex, line 602:: sage: M.random_element() # random [2*x^2 + 3*x + 4 4*x^2 + 2*x + 2 4*x^2 + 2*x] [ 3*x 2*x^2 + x + 3 3*x^2 + 4*x] [ 4*x^2 + 3 3*x^2 + 2*x + 4 2*x + 4] Sage example in ./domaines.tex, line 697:: sage: QQ.category() Join of Category of number fields and Category of quotient fields and Category of metric spaces Sage example in ./domaines.tex, line 704:: sage: QQ in Fields() True Sage example in ./domaines.tex, line 712:: sage: QQ in CommutativeAdditiveGroups() True Sage example in ./domaines.tex, line 718:: sage: QQ['x'] in EuclideanDomains() True Sage example in ./domaines.tex, line 859:: sage: 5.parent() Integer Ring Sage example in ./domaines.tex, line 872:: sage: type(factor(4)) <class 'sage.structure.factorization_integer.IntegerFactorization'> Sage example in ./domaines.tex, line 895:: sage: int(5) 5 sage: type(int(5)) <... 'int'> Sage example in ./domaines.tex, line 909:: sage: Integer(5) 5 sage: type(Integer(5)) <... 'sage.rings.integer.Integer'> Sage example in ./domaines.tex, line 926:: sage: factorial(99) / factorial(100) - 1 / 50 -1/100 Sage example in ./domaines.tex, line 974:: sage: 72/53 - 5/3 * 2.7 -3.14150943396227 Sage example in ./domaines.tex, line 982:: sage: cos(1), cos(1.) (cos(1), 0.540302305868140) Sage example in ./domaines.tex, line 1000:: sage: pi.n(digits=50) # variant: n(pi,digits=50) 3.1415926535897932384626433832795028841971693993751 Sage example in ./domaines.tex, line 1020:: sage: z = CC(1,2); z.arg() 1.10714871779409 Sage example in ./domaines.tex, line 1036:: sage: I.parent() Number Field in I with defining polynomial x^2 + 1 with I = 1*I Sage example in ./domaines.tex, line 1043:: sage: (1.+2.*I).parent() Complex Field with 53 bits of precision sage: (1.+2.*SR(I)).parent() Symbolic Ring Sage example in ./domaines.tex, line 1064:: sage: z = 3 * exp(I*pi/4) sage: z.real(), z.imag(), z.abs().canonicalize_radical() (3/2*sqrt(2), 3/2*sqrt(2), 3) Sage example in ./domaines.tex, line 1094:: sage: a, b, c = 0, 2, 3 sage: a == 1 or (b == 2 and c == 3) True Sage example in ./domaines.tex, line 1147:: sage: x, y = var('x, y') sage: bool( (x-y)*(x+y) == x^2-y^2 ) True Sage example in ./domaines.tex, line 1171:: sage: Z4 = IntegerModRing(4); Z4 Ring of integers modulo 4 sage: m = Z4(7); m 3 Sage example in ./domaines.tex, line 1184:: sage: 3 * m + 1 2 Sage example in ./domaines.tex, line 1191:: sage: Z3 = GF(3); Z3 Finite Field of size 3 Sage example in ./domaines.tex, line 1243:: sage: a = matrix(QQ, [[1,2,3],[2,4,8],[3,9,27]]) sage: (a^2 + 1) * a^(-1) [ -5 13/2 7/3] [ 7 1 25/3] [ 2 19/2 27] Sage example in ./domaines.tex, line 1259:: sage: M = MatrixSpace(QQ,3,3); M Full MatrixSpace of 3 by 3 dense matrices over Rational Field sage: a = M([[1,2,3],[2,4,8],[3,9,27]]) sage: (a^2 + 1) * a^(-1) [ -5 13/2 7/3] [ 7 1 25/3] [ 2 19/2 27] Sage example in ./domaines.tex, line 1283:: sage: P = ZZ['x']; P Univariate Polynomial Ring in x over Integer Ring sage: F = P.fraction_field(); F Fraction Field of Univariate Polynomial Ring in x over Integer Ring sage: p = P(x+1) * P(x); p x^2 + x sage: p + 1/p (x^4 + 2*x^3 + x^2 + 1)/(x^2 + x) sage: parent(p + 1/p) Fraction Field of Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 1382:: sage: k.<a> = NumberField(x^3 + x + 1); a^3; a^4+3*a -a - 1 -a^2 + 2*a Sage example in ./domaines.tex, line 1416:: sage: parent(sin(x)) Symbolic Ring Sage example in ./domaines.tex, line 1422:: sage: SR Symbolic Ring Sage example in ./domaines.tex, line 1428:: sage: SR.category() Category of fields Sage example in ./domaines.tex, line 1482:: sage: R = QQ['x1,x2,x3,x4']; R Multivariate Polynomial Ring in x1, x2, x3, x4 over Rational Field sage: x1, x2, x3, x4 = R.gens() Sage example in ./domaines.tex, line 1489:: sage: x1 * (x2 - x3) x1*x2 - x1*x3 Sage example in ./domaines.tex, line 1496:: sage: (x1+x2)*(x1-x2) - (x1^2 - x2^2) 0 Sage example in ./domaines.tex, line 1509:: sage: P = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ); P * P.lc() x1^3*x2^2*x3 - x1^2*x2^3*x3 - x1^3*x2*x3^2 + x1*x2^3*x3^2 + x1^2*x2*x3^3 - x1*x2^2*x3^3 - x1^3*x2^2*x4 + x1^2*x2^3*x4 + x1^3*x3^2*x4 - x2^3*x3^2*x4 - x1^2*x3^3*x4 + x2^2*x3^3*x4 + x1^3*x2*x4^2 - x1*x2^3*x4^2 - x1^3*x3*x4^2 + x2^3*x3*x4^2 + x1*x3^3*x4^2 - x2*x3^3*x4^2 - x1^2*x2*x4^3 + x1*x2^2*x4^3 + x1^2*x3*x4^3 - x2^2*x3*x4^3 - x1*x3^2*x4^3 + x2*x3^2*x4^3 Sage example in ./domaines.tex, line 1531:: sage: x1, x2, x3, x4 = SR.var('x1, x2, x3, x4') sage: got = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ) sage: expected1 = -(x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4) sage: expected2 = (x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4) sage: bool(got == expected1 or got == expected2) True Sage example in ./domaines.tex, line 1581:: sage: x = var('x') sage: p = 54*x^4+36*x^3-102*x^2-72*x-12 sage: factor(p) 6*(x^2 - 2)*(3*x + 1)^2 Sage example in ./domaines.tex, line 1616:: sage: R = ZZ['x']; R Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 1622:: sage: q = R(p); q 54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12 Sage example in ./domaines.tex, line 1629:: sage: parent(q) Univariate Polynomial Ring in x over Integer Ring Sage example in ./domaines.tex, line 1635:: sage: factor(q) 2 * 3 * (3*x + 1)^2 * (x^2 - 2) Sage example in ./domaines.tex, line 1642:: sage: R = QQ['x']; R Univariate Polynomial Ring in x over Rational Field sage: q = R(p); q 54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12 sage: factor(q) (54) * (x + 1/3)^2 * (x^2 - 2) Sage example in ./domaines.tex, line 1665:: sage: R = ComplexField(16)['x']; R Univariate Polynomial Ring in x over Complex Field with 16 bits of precision sage: q = R(p); q 54.00*x^4 + 36.00*x^3 - 102.0*x^2 - 72.00*x - 12.00 sage: factor(q) (54.00) * (x - 1.414) * (x + 0.3333)^2 * (x + 1.414) Sage example in ./domaines.tex, line 1685:: sage: R = QQ[sqrt(2)]['x']; R Univariate Polynomial Ring in x over Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095? sage: q = R(p); q 54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12 sage: factor(q) (54) * (x - sqrt2) * (x + sqrt2) * (x + 1/3)^2 Sage example in ./domaines.tex, line 1698:: sage: R = GF(5)['x']; R Univariate Polynomial Ring in x over Finite Field of size 5 sage: q = R(p); q 4*x^4 + x^3 + 3*x^2 + 3*x + 3 sage: factor(q) (4) * (x + 2)^2 * (x^2 + 3) | 1.823437 | 2 |
src/riotwatcher/riotwatcher.py | TheBoringBakery/Riot-Watcher | 2 | 9499 | from .Deserializer import Deserializer
from .RateLimiter import RateLimiter
from .Handlers import (
DeprecationHandler,
DeserializerAdapter,
DictionaryDeserializer,
RateLimiterAdapter,
ThrowOnErrorHandler,
TypeCorrectorHandler,
)
from .Handlers.RateLimit import BasicRateLimiter
from ._apis import BaseApi
from ._apis.riot import AccountApi
class RiotWatcher:
"""
RiotWatcher class is intended to be the main interaction point with the generic Riot APIs.
"""
def __init__(
self,
api_key: str,
timeout: int = None,
rate_limiter: RateLimiter = BasicRateLimiter(),
deserializer: Deserializer = DictionaryDeserializer(),
):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection to
the Riot API
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to Handlers.RateLimit.BasicRateLimiter.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is Handlers.DictionaryDeserializer.
"""
if not api_key:
raise ValueError("api_key must be set!")
handler_chain = [
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimiterAdapter(rate_limiter),
DeprecationHandler(),
]
self._base_api = BaseApi(api_key, handler_chain, timeout=timeout)
self._account = AccountApi(self._base_api)
@property
def account(self) -> AccountApi:
"""
Interface to the Account Endpoint
:rtype: riot.AccountApi
"""
return self._account
| from .Deserializer import Deserializer
from .RateLimiter import RateLimiter
from .Handlers import (
DeprecationHandler,
DeserializerAdapter,
DictionaryDeserializer,
RateLimiterAdapter,
ThrowOnErrorHandler,
TypeCorrectorHandler,
)
from .Handlers.RateLimit import BasicRateLimiter
from ._apis import BaseApi
from ._apis.riot import AccountApi
class RiotWatcher:
"""
RiotWatcher class is intended to be the main interaction point with the generic Riot APIs.
"""
def __init__(
self,
api_key: str,
timeout: int = None,
rate_limiter: RateLimiter = BasicRateLimiter(),
deserializer: Deserializer = DictionaryDeserializer(),
):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection to
the Riot API
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to Handlers.RateLimit.BasicRateLimiter.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is Handlers.DictionaryDeserializer.
"""
if not api_key:
raise ValueError("api_key must be set!")
handler_chain = [
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimiterAdapter(rate_limiter),
DeprecationHandler(),
]
self._base_api = BaseApi(api_key, handler_chain, timeout=timeout)
self._account = AccountApi(self._base_api)
@property
def account(self) -> AccountApi:
"""
Interface to the Account Endpoint
:rtype: riot.AccountApi
"""
return self._account
| en | 0.637523 | RiotWatcher class is intended to be the main interaction point with the generic Riot APIs. Initialize a new instance of the RiotWatcher class. :param string api_key: the API key to use for this instance :param int timeout: Time to wait for a response before timing out a connection to the Riot API :param RateLimiter rate_limiter: Instance to be used for rate limiting. This defaults to Handlers.RateLimit.BasicRateLimiter. :param Deserializer deserializer: Instance to be used to deserialize responses from the Riot Api. Default is Handlers.DictionaryDeserializer. Interface to the Account Endpoint :rtype: riot.AccountApi | 2.468872 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.