ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40fa08c53ba56a98f3eaec25d2892e1c3f66bd4 | #!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Protocol implementation.
"""
from types import MappingProxyType
import hashlib
import json
import logging
from avro import schema
# ------------------------------------------------------------------------------
# Constants
# Allowed top-level schemas in a protocol:
VALID_TYPE_SCHEMA_TYPES = frozenset(['enum', 'record', 'error', 'fixed'])
# ------------------------------------------------------------------------------
# Exceptions
class ProtocolParseException(schema.AvroException):
"""Error while parsing a JSON protocol descriptor."""
pass
# ------------------------------------------------------------------------------
# Base Classes
class Protocol(object):
"""An application protocol."""
@staticmethod
def _ParseTypeDesc(type_desc, names):
type_schema = schema.SchemaFromJSONData(type_desc, names=names)
if type_schema.type not in VALID_TYPE_SCHEMA_TYPES:
raise ProtocolParseException(
'Invalid type %r in protocol %r: '
'protocols can only declare types %s.'
% (type_schema, avro_name, ','.join(VALID_TYPE_SCHEMA_TYPES)))
return type_schema
@staticmethod
def _ParseMessageDesc(name, message_desc, names):
"""Parses a protocol message descriptor.
Args:
name: Name of the message.
message_desc: Descriptor of the message.
names: Tracker of the named Avro schema.
Returns:
The parsed protocol message.
Raises:
ProtocolParseException: if the descriptor is invalid.
"""
request_desc = message_desc.get('request')
if request_desc is None:
raise ProtocolParseException(
'Invalid message descriptor with no "request": %r.' % message_desc)
request_schema = Message._ParseRequestFromJSONDesc(
request_desc=request_desc,
names=names,
)
response_desc = message_desc.get('response')
if response_desc is None:
raise ProtocolParseException(
'Invalid message descriptor with no "response": %r.' % message_desc)
response_schema = Message._ParseResponseFromJSONDesc(
response_desc=response_desc,
names=names,
)
# Errors are optional:
errors_desc = message_desc.get('errors', tuple())
error_union_schema = Message._ParseErrorsFromJSONDesc(
errors_desc=errors_desc,
names=names,
)
return Message(
name=name,
request=request_schema,
response=response_schema,
errors=error_union_schema,
)
@staticmethod
def _ParseMessageDescMap(message_desc_map, names):
for name, message_desc in message_desc_map.items():
yield Protocol._ParseMessageDesc(
name=name,
message_desc=message_desc,
names=names,
)
def __init__(
self,
name,
namespace=None,
types=tuple(),
messages=tuple(),
):
"""Initializes a new protocol object.
Args:
name: Protocol name (absolute or relative).
namespace: Optional explicit namespace (if name is relative).
types: Collection of types in the protocol.
messages: Collection of messages in the protocol.
"""
self._avro_name = schema.Name(name=name, namespace=namespace)
self._fullname = self._avro_name.fullname
self._name = self._avro_name.simple_name
self._namespace = self._avro_name.namespace
self._props = {}
self._props['name'] = self._name
if self._namespace:
self._props['namespace'] = self._namespace
self._names = schema.Names(default_namespace=self._namespace)
self._types = tuple(types)
# Map: type full name -> type schema
self._type_map = MappingProxyType({type.fullname: type for type in self._types})
# This assertion cannot fail unless we don't track named schemas properly:
assert (len(self._types) == len(self._type_map)), (
'Type list %r does not match type map: %r'
% (self._types, self._type_map))
# TODO: set props['types']
self._messages = tuple(messages)
# Map: message name -> Message
# Note that message names are simple names unique within the protocol.
self._message_map = MappingProxyType({message.name: message for message in self._messages})
if len(self._messages) != len(self._message_map):
raise ProtocolParseException(
'Invalid protocol %s with duplicate message name: %r'
% (self._avro_name, self._messages))
# TODO: set props['messages']
self._md5 = hashlib.md5(str(self).encode('utf-8')).digest()
@property
def name(self):
"""Returns: the simple name of the protocol."""
return self._name
@property
def namespace(self):
"""Returns: the namespace this protocol belongs to."""
return self._namespace
@property
def fullname(self):
"""Returns: the fully qualified name of this protocol."""
return self._fullname
@property
def types(self):
"""Returns: the collection of types declared in this protocol."""
return self._types
@property
def type_map(self):
"""Returns: the map of types in this protocol, indexed by their full name."""
return self._type_map
@property
def messages(self):
"""Returns: the collection of messages declared in this protocol."""
return self._messages
@property
def message_map(self):
"""Returns: the map of messages in this protocol, indexed by their name."""
return self._message_map
@property
def md5(self):
return self._md5
@property
def props(self):
return self._props
def to_json(self):
to_dump = {}
to_dump['protocol'] = self.name
names = schema.Names(default_namespace=self.namespace)
if self.namespace:
to_dump['namespace'] = self.namespace
if self.types:
to_dump['types'] = [ t.to_json(names) for t in self.types ]
if self.messages:
messages_dict = {}
for name, body in self.message_map.items():
messages_dict[name] = body.to_json(names)
to_dump['messages'] = messages_dict
return to_dump
def __str__(self):
return json.dumps(self.to_json(), cls=schema.MappingProxyEncoder)
def __eq__(self, that):
to_cmp = json.loads(str(self))
return to_cmp == json.loads(str(that))
# ------------------------------------------------------------------------------
class Message(object):
"""A Protocol message."""
@staticmethod
def _ParseRequestFromJSONDesc(request_desc, names):
"""Parses the request descriptor of a protocol message.
Args:
request_desc: Descriptor of the message request.
This is a list of fields that defines an unnamed record.
names: Tracker for named Avro schemas.
Returns:
The parsed request schema, as an unnamed record.
"""
fields = schema.RecordSchema._MakeFieldList(request_desc, names=names)
return schema.RecordSchema(
name=None,
namespace=None,
fields=fields,
names=names,
record_type=schema.REQUEST,
)
@staticmethod
def _ParseResponseFromJSONDesc(response_desc, names):
"""Parses the response descriptor of a protocol message.
Args:
response_desc: Descriptor of the message response.
This is an arbitrary Avro schema descriptor.
Returns:
The parsed response schema.
"""
return schema.SchemaFromJSONData(response_desc, names=names)
@staticmethod
def _ParseErrorsFromJSONDesc(errors_desc, names):
"""Parses the errors descriptor of a protocol message.
Args:
errors_desc: Descriptor of the errors thrown by the protocol message.
This is a list of error types understood as an implicit union.
Each error type is an arbitrary Avro schema.
names: Tracker for named Avro schemas.
Returns:
The parsed ErrorUnionSchema.
"""
error_union_desc = {
'type': schema.ERROR_UNION,
'declared_errors': errors_desc,
}
return schema.SchemaFromJSONData(error_union_desc, names=names)
def __init__(self, name, request, response, errors=None):
self._name = name
self._props = {}
# TODO: set properties
self._request = request
self._response = response
self._errors = errors
@property
def name(self):
return self._name
@property
def request(self):
return self._request
@property
def response(self):
return self._response
@property
def errors(self):
return self._errors
def props(self):
return self._props
def __str__(self):
return json.dumps(self.to_json(), cls=schema.MappingProxyEncoder)
def to_json(self, names=None):
if names is None:
names = schema.Names()
to_dump = {}
to_dump['request'] = self.request.to_json(names)
to_dump['response'] = self.response.to_json(names)
if self.errors:
to_dump['errors'] = self.errors.to_json(names)
return to_dump
def __eq__(self, that):
return self.name == that.name and self.props == that.props
# ------------------------------------------------------------------------------
def ProtocolFromJSONData(json_data):
"""Builds an Avro Protocol from its JSON descriptor.
Args:
json_data: JSON data representing the descriptor of the Avro protocol.
Returns:
The Avro Protocol parsed from the JSON descriptor.
Raises:
ProtocolParseException: if the descriptor is invalid.
"""
if type(json_data) != dict:
raise ProtocolParseException(
'Invalid JSON descriptor for an Avro protocol: %r' % json_data)
name = json_data.get('protocol')
if name is None:
raise ProtocolParseException(
'Invalid protocol descriptor with no "name": %r' % json_data)
# Namespace is optional
namespace = json_data.get('namespace')
avro_name = schema.Name(name=name, namespace=namespace)
names = schema.Names(default_namespace=avro_name.namespace)
type_desc_list = json_data.get('types', tuple())
types = tuple(map(
lambda desc: Protocol._ParseTypeDesc(desc, names=names),
type_desc_list))
message_desc_map = json_data.get('messages', dict())
messages = tuple(Protocol._ParseMessageDescMap(message_desc_map, names=names))
return Protocol(
name=name,
namespace=namespace,
types=types,
messages=messages,
)
def Parse(json_string):
"""Constructs a Protocol from its JSON descriptor in text form.
Args:
json_string: String representation of the JSON descriptor of the protocol.
Returns:
The parsed protocol.
Raises:
ProtocolParseException: on JSON parsing error,
or if the JSON descriptor is invalid.
"""
try:
json_data = json.loads(json_string)
except Exception as exn:
raise ProtocolParseException(
'Error parsing protocol from JSON: %r. '
'Error message: %r.'
% (json_string, exn))
return ProtocolFromJSONData(json_data)
|
py | b40fa13336d83794af843538a18b37571670e482 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Jason Narad <[email protected]>
# Steven Bird <[email protected]> (modifications)
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://w3.msi.vxu.se/~nivre/research/MaltXML.html).
Currently only reads the first tree in a file.
"""
# python2.5 compatibility
from __future__ import with_statement
from __future__ import print_function
from nltk.tree import Tree
from pprint import pformat
import re
#################################################################
# DependencyGraph Class
#################################################################
class DependencyGraph(object):
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(self, tree_str=None):
"""
We place a dummy 'top' node in the first position
in the nodelist, since the root node is often assigned '0'
as its head. This also means that the indexing of the nodelist
corresponds directly to the Malt-TAB format, which starts at 1.
"""
top = {'word':None, 'deps':[], 'rel': 'TOP', 'tag': 'TOP', 'address': 0}
self.nodelist = [top]
self.root = None
self.stream = None
if tree_str:
self._parse(tree_str)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
node_index = len(self.nodelist) - 1
while(node_index >= 0):
node = self.nodelist[node_index]
if node['address'] == address:
self.nodelist.pop(node_index)
node_index -= 1
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodelist:
new_deps = []
for dep in node['deps']:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node['deps'] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
for node in self.nodelist:
if node['address'] == head_address and (mod_address not in node['deps']):
node['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodelist:
for node2 in self.nodelist:
if node1['address'] != node2['address'] and node2['rel'] != 'TOP':
node1['deps'].append(node2['address'])
# fix error and return
def get_by_address(self, node_address):
"""
Returns the node with the given address.
"""
for node in self.nodelist:
if node['address'] == node_address:
return node
print('THROW ERROR: address not found in -get_by_address-')
return -1
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
for node in self.nodelist:
if node['address'] == node_address:
return True
return False
def __str__(self):
return pformat(self.nodelist)
def __repr__(self):
return "<DependencyGraph with %d nodes>" % len(self.nodelist)
@staticmethod
def load(file):
"""
:param file: a file in Malt-TAB format
"""
with open(file) as f:
return DependencyGraph(f.read())
@staticmethod
def _normalize(line):
"""
Deal with lines in which spaces are used rather than tabs.
"""
SPC = re.compile(' +')
return re.sub(SPC, '\t', line).strip()
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = self.nodelist[node_index]['deps']
index = self.nodelist[node_index]['address']
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = self.nodelist[node_index]['deps']
index = self.nodelist[node_index]['address']
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node['address']):
self.nodelist.append(node)
def _parse(self, input):
lines = [DependencyGraph._normalize(line) for line in input.split('\n') if line.strip()]
temp = []
for index, line in enumerate(lines):
# print line
try:
cells = line.split('\t')
nrCells = len(cells)
if nrCells == 3:
word, tag, head = cells
rel = ''
elif nrCells == 4:
word, tag, head, rel = cells
elif nrCells == 10:
_, word, _, _, tag, _, head, rel, _, _ = cells
else:
raise ValueError('Number of tab-delimited fields (%d) not supported by CoNLL(10) or Malt-Tab(4) format' % (nrCells))
head = int(head)
self.nodelist.append({'address': index+1, 'word': word, 'tag': tag,
'head': head, 'rel': rel,
'deps': [d for (d,h) in temp if h == index+1]})
try:
self.nodelist[head]['deps'].append(index+1)
except IndexError:
temp.append((index+1, head))
except ValueError:
break
root_address = self.nodelist[0]['deps'][0]
self.root = self.nodelist[root_address]
def _word(self, node, filter=True):
w = node['word']
if filter:
if w != ',': return w
return w
def _tree(self, i):
"""
Recursive function for turning dependency graphs into
NLTK trees.
:type i: int
:param i: index of a node in ``nodelist``
:return: either a word (if the indexed node
is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node['word']
deps = node['deps']
return (Tree(word, [self._tree(j) for j in deps]) if len(deps) != 0 else word)
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node['word']
deps = node['deps']
return Tree(word, [self._tree(i) for i in deps])
def _hd(self, i):
try:
return self.nodelist[i]['head']
except IndexError:
return None
def _rel(self, i):
try:
return self.nodelist[i]['rel']
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
distances = {}
for node in self.nodelist:
for dep in node['deps']:
key = tuple([node['address'], dep]) #'%d -> %d' % (node['address'], dep)
distances[key] = 1
for n in range(len(self.nodelist)):
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
print(pair[0])
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0]) #self.nodelist[pair[0]], pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node['deps']:
if dep == goal_node_index:
return [curr_node['address']]
for dep in curr_node['deps']:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)#self.nodelist[dep], goal_node_index)
if len(path) > 0:
path.insert(0, curr_node['address'])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
lines = []
for i, node in enumerate(self.nodelist[1:]):
word, tag, head, rel = node['word'], node['tag'], node['head'], node['rel']
if style == 3:
lines.append('%s\t%s\t%s\n' % (word, tag, head))
elif style == 4:
lines.append('%s\t%s\t%s\t%s\n' % (word, tag, head, rel))
elif style == 10:
lines.append('%s\t%s\t_\t%s\t%s\t_\t%s\t%s\t_\t_\n' % (i+1, word, tag, tag, head, rel))
else:
raise ValueError('Number of tab-delimited fields (%d) not supported by CoNLL(10) or Malt-Tab(4) format' % (style))
return ''.join(lines)
def nx_graph(self):
"""
Convert the data in a ``nodelist`` into a networkx
labeled directed graph.
:rtype: XDigraph
"""
nx_nodelist = range(1, len(self.nodelist))
nx_edgelist = [(n, self._hd(n), self._rel(n))
for n in nx_nodelist if self._hd(n)]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodelist[n]['word']
g = NX.XDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph("""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
""")
tree = dg.tree()
print(tree.pprint())
if nx:
#currently doesn't work
import networkx as NX
import pylab as P
g = dg.nx_graph()
g.info()
pos = NX.spring_layout(g, dim=1)
NX.draw_networkx_nodes(g, pos, node_size=50)
#NX.draw_networkx_edges(g, pos, edge_color='k', width=8)
NX.draw_networkx_labels(g, pos, dg.nx_labels)
P.xticks([])
P.yticks([])
P.savefig('tree.png')
P.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
print(tree.pprint())
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print('Mass conll_read demo...')
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
for graph in graphs:
tree = graph.tree()
print('\n' + tree.pprint())
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
top = {'word':None, 'deps':[1], 'rel': 'TOP', 'address': 0}
child1 = {'word':None, 'deps':[2], 'rel': 'NTOP', 'address': 1}
child2 = {'word':None, 'deps':[4], 'rel': 'NTOP', 'address': 2}
child3 = {'word':None, 'deps':[1], 'rel': 'NTOP', 'address': 3}
child4 = {'word':None, 'deps':[3], 'rel': 'NTOP', 'address': 4}
cyclic_dg.nodelist = [top, child1, child2, child3, child4]
cyclic_dg.root = top
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == '__main__':
demo()
|
py | b40fa18924086934c82e3b5631d8f920ab8e2758 | # -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import json
from django.core import serializers
from django.db.models import F
from django.test import SimpleTestCase, TestCase
from testapp.models import Bit1Model, NullBit1Model
class TestSaveLoad(TestCase):
def test_basic(self):
m = Bit1Model()
m.flag_a = False
m.flag_b = True
m.save()
m = Bit1Model.objects.get(id=m.id)
assert not m.flag_a
assert m.flag_b
m.save()
m = Bit1Model.objects.get(id=m.id)
assert not m.flag_a
assert m.flag_b
m.flag_a = True
m.flag_b = False
m.save()
m = Bit1Model.objects.get(id=m.id)
assert m.flag_a
assert not m.flag_b
def test_defaults(self):
m = Bit1Model.objects.create()
assert m.flag_a
assert not m.flag_b
m = Bit1Model.objects.get(id=m.id)
assert m.flag_a
assert not m.flag_b
def test_filter(self):
m = Bit1Model.objects.create(flag_a=True, flag_b=True)
assert list(Bit1Model.objects.filter(flag_a=True)) == [m]
assert list(Bit1Model.objects.filter(flag_a=False)) == []
assert list(Bit1Model.objects.filter(flag_a=F('flag_b'))) == [m]
assert list(Bit1Model.objects.exclude(flag_a=F('flag_b'))) == []
m.flag_a = False
m.save()
assert list(Bit1Model.objects.filter(flag_a=True)) == []
assert list(Bit1Model.objects.filter(flag_a=False)) == [m]
assert list(Bit1Model.objects.filter(flag_a=F('flag_b'))) == []
assert list(Bit1Model.objects.exclude(flag_a=F('flag_b'))) == [m]
Bit1Model.objects.filter(flag_a=False).update(flag_a=True)
assert list(Bit1Model.objects.filter(flag_a=True)) == [m]
assert list(Bit1Model.objects.filter(flag_a=False)) == []
Bit1Model.objects.filter(flag_a=True).update(flag_a=False)
assert list(Bit1Model.objects.filter(flag_a=True)) == []
assert list(Bit1Model.objects.filter(flag_a=False)) == [m]
class TestSerialization(SimpleTestCase):
def test_dumping(self):
instance = Bit1Model(flag_a=True, flag_b=False)
data = json.loads(serializers.serialize('json', [instance]))[0]
fields = data['fields']
assert fields['flag_a']
assert not fields['flag_b']
def test_loading(self):
test_data = '''
[{"fields": {"flag_a": false, "flag_b": true},
"model": "testapp.Bit1Model", "pk": null}]
'''
objs = list(serializers.deserialize('json', test_data))
assert len(objs) == 1
instance = objs[0].object
assert not instance.flag_a
assert instance.flag_b
class TestNullSaveLoad(TestCase):
def test_basic(self):
m = NullBit1Model()
assert m.flag is None
m.save()
m = NullBit1Model.objects.get(id=m.id)
print(m.flag)
print(type(m.flag))
assert m.flag is None
m.flag = True
m.save()
m = NullBit1Model.objects.get(id=m.id)
assert m.flag
m.flag = False
m.save()
m = NullBit1Model.objects.get(id=m.id)
assert m.flag is not None and not m.flag
def test_defaults(self):
m = NullBit1Model.objects.create()
assert m.flag is None
m = NullBit1Model.objects.get(id=m.id)
assert m.flag is None
def test_filter(self):
m = NullBit1Model.objects.create()
assert list(NullBit1Model.objects.filter(flag=None)) == [m]
assert list(NullBit1Model.objects.filter(flag=True)) == []
assert list(NullBit1Model.objects.filter(flag=False)) == []
m.flag = True
m.save()
assert list(NullBit1Model.objects.filter(flag=None)) == []
assert list(NullBit1Model.objects.filter(flag=True)) == [m]
assert list(NullBit1Model.objects.filter(flag=False)) == []
m.flag = False
m.save()
assert list(NullBit1Model.objects.filter(flag=None)) == []
assert list(NullBit1Model.objects.filter(flag=True)) == []
assert list(NullBit1Model.objects.filter(flag=False)) == [m]
NullBit1Model.objects.filter(flag=False).update(flag=None)
assert list(NullBit1Model.objects.filter(flag=None)) == [m]
assert list(NullBit1Model.objects.filter(flag=True)) == []
assert list(NullBit1Model.objects.filter(flag=False)) == []
NullBit1Model.objects.filter(flag=None).update(flag=True)
assert list(NullBit1Model.objects.filter(flag=None)) == []
assert list(NullBit1Model.objects.filter(flag=True)) == [m]
assert list(NullBit1Model.objects.filter(flag=False)) == []
NullBit1Model.objects.filter(flag=True).update(flag=False)
assert list(NullBit1Model.objects.filter(flag=None)) == []
assert list(NullBit1Model.objects.filter(flag=True)) == []
assert list(NullBit1Model.objects.filter(flag=False)) == [m]
class TestNullSerialization(SimpleTestCase):
def test_dumping(self):
instance = NullBit1Model(flag=None)
data = json.loads(serializers.serialize('json', [instance]))[0]
fields = data['fields']
assert fields['flag'] is None
def test_loading(self):
test_data = '''
[{"fields": {"flag": null},
"model": "testapp.NullBit1Model", "pk": null}]
'''
objs = list(serializers.deserialize('json', test_data))
assert len(objs) == 1
instance = objs[0].object
assert instance.flag is None
|
py | b40fa339708af61b3227d4b339c32b75a175925d | from assets.art import *
from printer.coloured import *
from printer.greyscale import *
#display message and logo
print(logo)
print("You are welcome")
print('+++++++++++++++++++++++++++++++++++++++++++++\n')
def Print_document():
"""funtion to collect users printing format and process the printing"""
print_format = input("What format would you like? ( coloured or greyscale ): ")
while True:
try:
if print_format == "coloured":
pages_number = int(input('Enter Number of Pages: '))
print_doc = coloured(pages_number)
print(print_doc.check_ink())
return print_again()
elif print_format == "greyscale":
pages_number = int(input('Enter Number of Pages: '))
print_doc = greyscale(pages_number)
print(print_doc.check_ink())
return print_again()
elif print_format == "report":
report_doc = Printer()
print(report_doc.report())
return print_again()
elif print_format == "off":
print('you have turned off the printer successfully')
quit()
else:
print("Invalid format: type ('coloured' or 'greyscale')")
return Print_document()
except ValueError:
return 'invalid input: restart printer and enter a number for pages no'
def print_again():
"""function exits or restarts the program, with yes or no response"""
print('thank you for using our service')
something_else = input('would you like to do something else (yes or no): ')
if something_else == 'yes':
return Print_document()
else:
exit()
#main entry point to the application
if __name__ == '__main__':
print(Print_document())
|
py | b40fa397d5ef419cfb3ea0b854b8cc17a9eea6b2 | #coding:utf-8
import os
import string
import sys
import time
import re
import StringIO
import tempfile
import threading
import traceback
import select
from datetime import datetime
from email.parser import Parser
from email.message import Message
from threading import Thread
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.crypto.state import *
from mailpile.crypto.mime import MimeSigningWrapper, MimeEncryptingWrapper
from mailpile.safe_popen import Popen, PIPE, Safe_Pipe
DEFAULT_SERVER = "hkp://subset.pool.sks-keyservers.net"
GPG_KEYID_LENGTH = 8
GNUPG_HOMEDIR = None # None=use what gpg uses
GPG_BINARY = 'gpg'
if sys.platform.startswith('win'):
GPG_BINARY = 'GnuPG\\gpg.exe'
BLOCKSIZE = 65536
openpgp_trust = {"-": _("Trust not calculated"),
"o": _("Unknown trust"),
"q": _("Undefined trust"),
"n": _("Never trust"),
"m": _("Marginally trust"),
"f": _("Full trust"),
"u": _("Ultimate trust"),
"e": _("Expired key, not trusted"),
"d": _("Disabled key, not trusted"), # Deprecated flag.
"r": _("Revoked key, not trusted")}
openpgp_algorithms = {1: _("RSA"),
2: _("RSA (encrypt only)"),
3: _("RSA (sign only)"),
16: _("Elgamal (encrypt only)"),
17: _("DSA"),
20: _("Elgamal (encrypt/sign) [COMPROMISED]")}
# For details on type 20 compromisation, see
# http://lists.gnupg.org/pipermail/gnupg-announce/2003q4/000160.html
class GnuPGResultParser:
"""
Parse the GPG response into EncryptionInfo and SignatureInfo.
"""
def __init__(rp):
rp.signature_info = SignatureInfo()
rp.signature_info["protocol"] = "openpgp"
rp.encryption_info = EncryptionInfo()
rp.encryption_info["protocol"] = "openpgp"
rp.plaintext = ""
def parse(rp, retvals):
signature_info = rp.signature_info
encryption_info = rp.encryption_info
from mailpile.mailutils import ExtractEmailAndName
# First pass, set some initial state.
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == "DECRYPTION_FAILED":
missing = [x[1] for x in retvals[1]["status"]
if x[0] == "NO_SECKEY"]
if missing:
encryption_info.part_status = "missingkey"
encryption_info["missing_keys"] = missing
else:
encryption_info.part_status = "error"
elif keyword == "DECRYPTION_OKAY":
encryption_info.part_status = "decrypted"
rp.plaintext = "".join(retvals[1]["stdout"])
elif keyword == "ENC_TO":
keylist = encryption_info.get("have_keys", [])
if data[0] not in keylist:
keylist.append(data[1])
encryption_info["have_keys"] = keylist
elif signature_info.part_status == "none":
# Only one of these will ever be emitted per key, use
# this to set initial state. We may end up revising
# the status depending on more info later.
if keyword in ("GOODSIG", "BADSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "GOODSIG")
and "unverified"
or "invalid")
elif keyword == "ERRSIG":
signature_info.part_status = "error"
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[5])
# Second pass, this may update/mutate the state set above
for data in retvals[1]["status"]:
keyword = data[0].strip() # The last keyword often ends in \n
if keyword == "NO_SECKEY":
if "missing_keys" not in encryption_info:
encryption_info["missing_keys"] = [data[1]]
else:
encryption_info["missing_keys"].append(data[1])
try:
encryption_info["have_keys"].remove(data[1])
except (KeyError, ValueError):
pass
elif keyword == "VALIDSIG":
# FIXME: Determine trust level, between new, unverified,
# verified, untrusted.
signature_info["keyinfo"] = data[1]
signature_info["timestamp"] = int(data[3])
elif keyword in ("EXPKEYSIG", "REVKEYSIG"):
email, fn = ExtractEmailAndName(
" ".join(data[2:]).decode('utf-8'))
signature_info["name"] = fn
signature_info["email"] = email
signature_info.part_status = ((keyword == "EXPKEYSIG")
and "expired"
or "revoked")
# FIXME: This appears to be spammy. Is my key borked, or
# is GnuPG being stupid?
#
# elif keyword == "KEYEXPIRED": # Ignoring: SIGEXPIRED
# signature_info.part_status = "expired"
elif keyword == "KEYREVOKED":
signature_info.part_status = "revoked"
elif keyword == "NO_PUBKEY":
signature_info.part_status = "unknown"
elif keyword in ("TRUST_ULTIMATE", "TRUST_FULLY"):
if signature_info.part_status == "unverified":
signature_info.part_status = "verified"
return rp
class GnuPGRecordParser:
def __init__(self):
self.keys = {}
self.curkey = None
self.record_fields = ["record", "validity", "keysize", "keytype",
"keyid", "creation_date", "expiration_date",
"uidhash", "ownertrust", "uid", "sigclass",
"capabilities", "flag", "sn", "hashtype",
"curve"]
self.record_types = ["pub", "sub", "ssb", "fpr", "uat", "sec", "tru",
"sig", "rev", "uid", "gpg", "rvk"]
self.record_parsers = [self.parse_pubkey, self.parse_subkey,
self.parse_subkey, self.parse_fingerprint,
self.parse_userattribute, self.parse_privkey,
self.parse_trust, self.parse_signature,
self.parse_revoke, self.parse_uidline,
self.parse_none, self.parse_revocation_key]
self.dispatch = dict(zip(self.record_types, self.record_parsers))
def parse(self, lines):
for line in lines:
self.parse_line(line)
return self.keys
def parse_line(self, line):
line = dict(zip(self.record_fields,
map(lambda s: s.replace("\\x3a", ":"),
line.strip().split(":"))))
r = self.dispatch.get(line["record"], self.parse_unknown)
r(line)
def parse_pubkey(self, line):
self.curkey = line["keyid"]
line["keytype_name"] = openpgp_algorithms[int(line["keytype"])]
line["capabilities_map"] = {
"encrypt": "E" in line["capabilities"],
"sign": "S" in line["capabilities"],
"certify": "C" in line["capabilities"],
"authenticate": "A" in line["capabilities"],
}
line["disabled"] = "D" in line["capabilities"]
line["revoked"] = "r" in line["validity"]
line["private_key"] = False
line["subkeys"] = []
line["uids"] = []
for ts in ('expiration_date', 'creation_date'):
if line.get(ts) and '-' not in line[ts]:
try:
unixtime = int(line[ts])
if unixtime > 946684800: # 2000-01-01
dt = datetime.fromtimestamp(unixtime)
line[ts] = dt.strftime('%Y-%m-%d')
except ValueError:
line[ts+'_unparsed'] = line[ts]
line[ts] = '1970-01-01'
if line["record"] == "sec":
line["secret"] = True
self.keys[self.curkey] = line
self.parse_uidline(line)
def parse_subkey(self, line):
subkey = {"id": line["keyid"],
"keysize": line["keysize"],
"creation_date": line["creation_date"],
"keytype_name": openpgp_algorithms[int(line["keytype"])]}
self.keys[self.curkey]["subkeys"].append(subkey)
def parse_fingerprint(self, line):
self.keys[self.curkey]["fingerprint"] = line["uid"]
self.keys[line["uid"]] = self.keys[self.curkey]
del(self.keys[self.curkey])
self.curkey = line["uid"]
def parse_userattribute(self, line):
# TODO: We are currently ignoring user attributes as not useful.
# We may at some point want to use --attribute-fd and read
# in user photos and such?
pass
def parse_privkey(self, line):
self.parse_pubkey(line)
def parse_uidline(self, line):
email, name, comment = parse_uid(line["uid"])
if email or name or comment:
self.keys[self.curkey]["uids"].append({
"email": email,
"name": name,
"comment": comment,
"creation_date": line["creation_date"]
})
else:
pass # This is the case where a uid or sec line have no
# information aside from the creation date, which we
# parse elsewhere. As these lines are effectively blank,
# we omit them to simplify presentation to the user.
def parse_trust(self, line):
# TODO: We are currently ignoring commentary from the Trust DB.
pass
def parse_signature(self, line):
if "signatures" not in self.keys[self.curkey]:
self.keys[self.curkey]["signatures"] = []
sig = {
"signer": line[9],
"signature_date": line[5],
"keyid": line[4],
"trust": line[10],
"keytype": line[4]
}
self.keys[self.curkey]["signatures"].append(sig)
def parse_revoke(self, line):
pass # FIXME
def parse_revocation_key(self, line):
pass # FIXME
def parse_unknown(self, line):
print "Unknown line with code '%s'" % (line,)
def parse_none(line):
pass
UID_PARSE_RE = "^([^\(\<]+?){0,1}( \((.+?)\)){0,1}( \<(.+?)\>){0,1}\s*$"
def parse_uid(uidstr):
matches = re.match(UID_PARSE_RE, uidstr)
if matches:
email = matches.groups(0)[4] or ""
comment = matches.groups(0)[2] or ""
name = matches.groups(0)[0] or ""
else:
if '@' in uidstr and ' ' not in uidstr:
email, name = uidstr, ""
else:
email, name = "", uidstr
comment = ""
try:
name = name.decode("utf-8")
except UnicodeDecodeError:
try:
name = name.decode("iso-8859-1")
except UnicodeDecodeError:
name = name.decode("utf-8", "replace")
try:
comment = comment.decode("utf-8")
except UnicodeDecodeError:
try:
comment = comment.decode("iso-8859-1")
except UnicodeDecodeError:
comment = comment.decode("utf-8", "replace")
return email, name, comment
class StreamReader(Thread):
def __init__(self, name, fd, callback, lines=True):
Thread.__init__(self, target=self.readin, args=(fd, callback))
self.name = name
self.state = 'startup'
self.lines = lines
self.start()
def __str__(self):
return '%s(%s/%s, lines=%s)' % (Thread.__str__(self),
self.name, self.state, self.lines)
def readin(self, fd, callback):
try:
if self.lines:
self.state = 'read'
for line in iter(fd.readline, b''):
self.state = 'callback'
callback(line)
self.state = 'read'
else:
while True:
self.state = 'read'
buf = fd.read(BLOCKSIZE)
self.state = 'callback'
callback(buf)
if buf == "":
break
except:
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
class StreamWriter(Thread):
def __init__(self, name, fd, output, partial_write_ok=False):
Thread.__init__(self, target=self.writeout, args=(fd, output))
self.name = name
self.state = 'startup'
self.partial_write_ok = partial_write_ok
self.start()
def __str__(self):
return '%s(%s/%s)' % (Thread.__str__(self), self.name, self.state)
def writeout(self, fd, output):
if isinstance(output, (str, unicode)):
total = len(output)
output = StringIO.StringIO(output)
else:
total = 0
try:
while True:
self.state = 'read'
line = output.read(BLOCKSIZE)
if line == "":
break
self.state = 'write'
fd.write(line)
total -= len(line)
output.close()
except:
if not self.partial_write_ok:
print '%s: %s bytes left' % (self, total)
traceback.print_exc()
finally:
self.state = 'done'
fd.close()
DEBUG_GNUPG = False
class GnuPG:
"""
Wrap GnuPG and make all functionality feel Pythonic.
"""
ARMOR_BEGIN_SIGNED = '-----BEGIN PGP SIGNED MESSAGE-----'
ARMOR_BEGIN_SIGNATURE = '-----BEGIN PGP SIGNATURE-----'
ARMOR_END_SIGNATURE = '-----END PGP SIGNATURE-----'
ARMOR_END_SIGNED = '-----END PGP SIGNATURE-----'
ARMOR_BEGIN_ENCRYPTED = '-----BEGIN PGP MESSAGE-----'
ARMOR_END_ENCRYPTED = '-----END PGP MESSAGE-----'
def __init__(self, config, session=None, use_agent=False, debug=False):
global DEBUG_GNUPG
self.available = None
self.gpgbinary = GPG_BINARY
self.outputfds = ["stdout", "stderr", "status"]
self.errors = []
self.session = session
self.config = config or (session and session.config) or None
self.use_agent = use_agent
if self.config:
self.homedir = self.config.sys.gpg_home or GNUPG_HOMEDIR
DEBUG_GNUPG = ('gnupg' in self.config.sys.debug)
self.passphrase = self.config.gnupg_passphrase.get_reader()
else:
self.passphrase = None
self.homedir = GNUPG_HOMEDIR
self.debug = (self._debug_all if (debug or DEBUG_GNUPG)
else self._debug_none)
def _debug_all(self, msg):
if self.session:
self.session.debug(msg.rstrip())
else:
print '%s' % str(msg).rstrip()
def _debug_none(self, msg):
pass
def set_home(self, path):
self.homedir = path
def version(self):
retvals = self.run(["--version"])
return retvals[1]["stdout"][0].split('\n')[0]
def is_available(self):
try:
retvals = self.run(["--version"])
self.available = True
except OSError:
self.available = False
return self.available
def run(self,
args=None, gpg_input=None, outputfd=None, partial_read_ok=False,
send_passphrase=False, _raise=None):
self.outputbuffers = dict([(x, []) for x in self.outputfds])
self.threads = {}
wtf = ' '.join(args)
args = args[:] if args else []
args.insert(0, self.gpgbinary)
args.insert(1, "--utf8-strings")
args.insert(1, "--with-colons")
args.insert(1, "--verbose")
args.insert(1, "--batch")
args.insert(1, "--enable-progress-filter")
if not self.use_agent:
args.insert(1, "--no-use-agent")
if self.homedir:
args.insert(1, "--homedir=%s" % self.homedir)
gpg_retcode = -1
proc = None
try:
args.insert(1, "--status-fd=2")
if self.passphrase and send_passphrase:
if self.use_agent:
args.insert(1, "--no-use-agent")
args.insert(2, "--passphrase-fd=0")
if not self.passphrase and send_passphrase:
self.debug('Running WITHOUT PASSPHRASE %s' % ' '.join(args))
self.debug(traceback.format_stack())
else:
self.debug('Running %s' % ' '.join(args))
# Here we go!
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=0)
# GnuPG is a bit crazy, and requires that the passphrase
# be sent and the filehandle closed before anything else
# interesting happens.
if self.passphrase and send_passphrase:
c = self.passphrase.read(BLOCKSIZE)
while c != '':
proc.stdin.write(c)
c = self.passphrase.read(BLOCKSIZE)
proc.stdin.write('\n')
self.threads = {
"stderr": StreamReader('gpgi-stderr(%s)' % wtf,
proc.stderr, self.parse_stderr)
}
if outputfd:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-to-fd(%s)' % wtf,
proc.stdout, outputfd.write, lines=False)
else:
self.threads["stdout"] = StreamReader(
'gpgi-stdout-parsed(%s)' % wtf,
proc.stdout, self.parse_stdout)
if gpg_input:
# If we have output, we just stream it. Technically, this
# doesn't really need to be a thread at the moment.
self.debug('<<STDOUT<< %s' % gpg_input)
StreamWriter('gpgi-output(%s)' % wtf,
proc.stdin, gpg_input,
partial_write_ok=partial_read_ok).join()
else:
proc.stdin.close()
# Reap GnuPG
gpg_retcode = proc.wait()
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
# Reap the threads
self._reap_threads()
if outputfd:
outputfd.close()
if gpg_retcode != 0 and _raise:
raise _raise('GnuPG failed, exit code: %s' % gpg_retcode)
return gpg_retcode, self.outputbuffers
def _reap_threads(self):
for name, thr in self.threads.iteritems():
if thr.isAlive():
thr.join(timeout=15)
if thr.isAlive():
print 'SCARY WARNING: FAILED TO REAP THREAD %s' % thr
def parse_status(self, line, *args):
self.debug('<<STATUS<< %s' % line)
line = line.replace("[GNUPG:] ", "")
if line == "":
return
elems = line.split(" ")
self.outputbuffers["status"].append(elems)
def parse_stdout(self, line):
self.debug('<<STDOUT<< %s' % line)
self.outputbuffers["stdout"].append(line)
def parse_stderr(self, line):
if line.startswith("[GNUPG:] "):
return self.parse_status(line)
self.debug('<<STDERR<< %s' % line)
self.outputbuffers["stderr"].append(line)
def parse_keylist(self, keylist):
rlp = GnuPGRecordParser()
return rlp.parse(keylist)
def list_keys(self, selectors=None):
"""
>>> g = GnuPG(None)
>>> g.list_keys()[0]
0
"""
list_keys = ["--fingerprint"]
for sel in selectors or []:
list_keys += ["--list-keys", sel]
if not selectors:
list_keys += ["--list-keys"]
retvals = self.run(list_keys)
return self.parse_keylist(retvals[1]["stdout"])
def list_secret_keys(self, selectors=None):
#
# Note: The "." parameter that is passed is to work around a bug
# in GnuPG < 2.1, where --list-secret-keys does not list
# details about key capabilities or expiry for
# --list-secret-keys unless a selector is provided. A dot
# is reasonably likely to appear in all PGP keys, as it is
# a common component of e-mail addresses (and @ does not
# work as a selector for some reason...)
#
# The downside of this workaround is that keys with no e-mail
# address or an address like alice@localhost won't be found.
# Therefore, this paramter should be removed when GnuPG >= 2.1
# becomes commonplace.
#
# (This is a better workaround than doing an additional
# --list-keys and trying to aggregate it though...)
#
# BRE: Put --fingerprint at the front and added selectors
# for the worlds MOST POPULAR LETTERS! Yaaay!
#
if not selectors:
selectors = [".", "a", "e", "i", "p", "t", "k"]
list_keys = ["--fingerprint"]
for sel in selectors:
list_keys += ["--list-secret-keys", sel]
retvals = self.run(list_keys)
secret_keys = self.parse_keylist(retvals[1]["stdout"])
# Another unfortunate thing GPG does, is it hides the disabled
# state when listing secret keys; it seems internally only the
# public key is disabled. This makes it hard for us to reason about
# which keys can actually be used, so we compensate...
list_keys = ["--fingerprint"]
for fprint in secret_keys:
list_keys += ["--list-keys", fprint]
retvals = self.run(list_keys)
public_keys = self.parse_keylist(retvals[1]["stdout"])
for fprint, info in public_keys.iteritems():
if fprint in secret_keys:
for k in ("disabled", "revoked"): # FIXME: Copy more?
secret_keys[fprint][k] = info[k]
return secret_keys
def import_keys(self, key_data=None):
"""
Imports gpg keys from a file object or string.
>>> key_data = open("testing/pub.key").read()
>>> g = GnuPG(None)
>>> g.import_keys(key_data)
{'failed': [], 'updated': [{'details_text': 'unchanged', 'details': 0, 'fingerprint': '08A650B8E2CBC1B02297915DC65626EED13C70DA'}], 'imported': [], 'results': {'sec_dups': 0, 'unchanged': 1, 'num_uids': 0, 'skipped_new_keys': 0, 'no_userids': 0, 'num_signatures': 0, 'num_revoked': 0, 'sec_imported': 0, 'sec_read': 0, 'not_imported': 0, 'count': 1, 'imported_rsa': 0, 'imported': 0, 'num_subkeys': 0}}
"""
retvals = self.run(["--import"], gpg_input=key_data)
return self._parse_import(retvals[1]["status"])
def _parse_import(self, output):
res = {"imported": [], "updated": [], "failed": []}
for x in output:
if x[0] == "IMPORTED":
res["imported"].append({
"fingerprint": x[1],
"username": x[2].rstrip()
})
elif x[0] == "IMPORT_OK":
reasons = {
"0": "unchanged",
"1": "new key",
"2": "new user IDs",
"4": "new signatures",
"8": "new subkeys",
"16": "contains private key",
}
res["updated"].append({
"details": int(x[1]),
"details_text": reasons[x[1]],
"fingerprint": x[2].rstrip(),
})
elif x[0] == "IMPORT_PROBLEM":
reasons = {
"0": "no reason given",
"1": "invalid certificate",
"2": "issuer certificate missing",
"3": "certificate chain too long",
"4": "error storing certificate",
}
res["failed"].append({
"details": int(x[1]),
"details_text": reasons[x[1]],
"fingerprint": x[2].rstrip()
})
elif x[0] == "IMPORT_RES":
res["results"] = {
"count": int(x[1]),
"no_userids": int(x[2]),
"imported": int(x[3]),
"imported_rsa": int(x[4]),
"unchanged": int(x[5]),
"num_uids": int(x[6]),
"num_subkeys": int(x[7]),
"num_signatures": int(x[8]),
"num_revoked": int(x[9]),
"sec_read": int(x[10]),
"sec_imported": int(x[11]),
"sec_dups": int(x[12]),
"skipped_new_keys": int(x[13]),
"not_imported": int(x[14].rstrip()),
}
return res
def decrypt(self, data, outputfd=None, passphrase=None, as_lines=False):
"""
Note that this test will fail if you don't replace the recipient with
one whose key you control.
>>> g = GnuPG(None)
>>> ct = g.encrypt("Hello, World", to=["[email protected]"])[1]
>>> g.decrypt(ct)["text"]
'Hello, World'
"""
if passphrase:
self.passphrase = passphrase
action = ["--decrypt"]
retvals = self.run(action, gpg_input=data, outputfd=outputfd,
send_passphrase=True)
self.passphrase = None
if as_lines:
as_lines = retvals[1]["stdout"]
retvals[1]["stdout"] = []
rp = GnuPGResultParser().parse(retvals)
return (rp.signature_info, rp.encryption_info,
as_lines or rp.plaintext)
def remove_armor(self, text):
lines = text.strip().splitlines(True)
if lines[0].startswith(self.ARMOR_BEGIN_SIGNED):
for idx in reversed(range(0, len(lines))):
if lines[idx].startswith(self.ARMOR_BEGIN_SIGNATURE):
lines = lines[:idx]
while lines and lines[0].strip():
lines.pop(0)
break
return ''.join(lines).strip()
def verify(self, data, signature=None):
"""
>>> g = GnuPG(None)
>>> s = g.sign("Hello, World", _from="[email protected]",
clearsign=True)[1]
>>> g.verify(s)
"""
params = ["--verify"]
if signature:
sig = tempfile.NamedTemporaryFile()
sig.write(signature)
sig.flush()
params.append(sig.name)
params.append("-")
ret, retvals = self.run(params, gpg_input=data, partial_read_ok=True)
return GnuPGResultParser().parse([None, retvals]).signature_info
def encrypt(self, data, tokeys=[], armor=True,
sign=False, fromkey=None):
"""
>>> g = GnuPG(None)
>>> g.encrypt("Hello, World", to=["[email protected]"])[0]
0
"""
action = ["--encrypt", "--yes", "--expert", "--trust-model", "always"]
if armor:
action.append("--armor")
for r in tokeys:
action.append("--recipient")
action.append(r)
if sign:
action.append("--sign")
if sign and fromkey:
action.append("--local-user")
action.append(fromkey)
retvals = self.run(action, gpg_input=data, send_passphrase=sign)
return retvals[0], "".join(retvals[1]["stdout"])
def sign(self, data,
fromkey=None, armor=True, detatch=True, clearsign=False,
passphrase=None):
"""
>>> g = GnuPG(None)
>>> g.sign("Hello, World", fromkey="[email protected]")[0]
0
"""
if passphrase:
self.passphrase = passphrase
if detatch and not clearsign:
action = ["--detach-sign"]
elif clearsign:
action = ["--clearsign"]
else:
action = ["--sign"]
if armor:
action.append("--armor")
if fromkey:
action.append("--local-user")
action.append(fromkey)
retvals = self.run(action, gpg_input=data, send_passphrase=True)
self.passphrase = None
return retvals[0], "".join(retvals[1]["stdout"])
def sign_encrypt(self, data, fromkey=None, tokeys=[], armor=True,
detatch=False, clearsign=True):
retval, signblock = self.sign(data, fromkey=fromkey, armor=armor,
detatch=detatch, clearsign=clearsign)
if detatch:
# TODO: Deal with detached signature.
retval, cryptblock = self.encrypt(data, tokeys=tokeys,
armor=armor)
else:
retval, cryptblock = self.encrypt(signblock, tokeys=tokeys,
armor=armor)
return cryptblock
def sign_key(self, keyid, signingkey=None):
action = ["--yes", "--sign-key", keyid]
if signingkey:
action.insert(1, "-u")
action.insert(2, signingkey)
retvals = self.run(action, send_passphrase=True)
return retvals
def recv_key(self, keyid, keyserver=DEFAULT_SERVER):
retvals = self.run(['--keyserver', keyserver, '--recv-key', keyid])
return self._parse_import(retvals[1]["status"])
def search_key(self, term, keyserver=DEFAULT_SERVER):
retvals = self.run(['--keyserver', keyserver,
'--fingerprint',
'--search-key', self._escape_hex_keyid_term(term)]
)[1]["stdout"]
results = {}
lines = [x.strip().split(":") for x in retvals]
curpub = None
for line in lines:
if line[0] == "info":
pass
elif line[0] == "pub":
curpub = line[1]
validity = line[6]
if line[5]:
if int(line[5]) < time.time():
validity += 'e'
results[curpub] = {
"created": datetime.fromtimestamp(int(line[4])),
"keytype_name": openpgp_algorithms[int(line[2])],
"keysize": line[3],
"validity": validity,
"uids": [],
"fingerprint": curpub
}
elif line[0] == "uid":
email, name, comment = parse_uid(line[1])
results[curpub]["uids"].append({"name": name,
"email": email,
"comment": comment})
return results
def get_pubkey(self, keyid):
retvals = self.run(['--armor',
'--export', keyid]
)[1]["stdout"]
return "".join(retvals)
def address_to_keys(self, address):
res = {}
keys = self.list_keys(selectors=[address])
for key, props in keys.iteritems():
if any([x["email"] == address for x in props["uids"]]):
res[key] = props
return res
def _escape_hex_keyid_term(self, term):
"""Prepends a 0x to hexadecimal key ids, e.g. D13C70DA is converted to 0xD13C70DA.
This is necessary because version 1 and 2 of GnuPG show a different behavior here,
version 1 allows to search without 0x while version 2 requires 0x in front of the key id.
"""
is_hex_keyid = False
if len(term) == GPG_KEYID_LENGTH or len(term) == 2*GPG_KEYID_LENGTH:
hex_digits = set(string.hexdigits)
is_hex_keyid = all(c in hex_digits for c in term)
if is_hex_keyid:
return '0x%s' % term
else:
return term
def chat(self, gpg_args, callback, *args, **kwargs):
"""This lets a callback have a chat with the GPG process..."""
gpg_args = [self.gpgbinary,
"--utf8-strings",
"--no-use-agent",
"--no-tty",
"--command-fd=0",
"--status-fd=1"] + (gpg_args or [])
if self.homedir:
gpg_args.insert(1, "--homedir=%s" % self.homedir)
proc = None
try:
# Here we go!
proc = Popen(gpg_args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
bufsize=0)
return callback(proc, *args, **kwargs)
finally:
# Close this so GPG will terminate. This should already have
# been done, but we're handling errors here...
if proc and proc.stdin:
proc.stdin.close()
if proc:
proc.wait()
def GetKeys(gnupg, config, people):
keys = []
missing = []
ambig = []
# First, we go to the contact database and get a list of keys.
for person in set(people):
if '#' in person:
keys.append(person.rsplit('#', 1)[1])
else:
vcard = config.vcards.get_vcard(person)
if vcard:
# It is the VCard's job to give us the best key first.
lines = [vcl for vcl in vcard.get_all('KEY')
if vcl.value.startswith('data:application'
'/x-pgp-fingerprint,')]
if len(lines) > 0:
keys.append(lines[0].value.split(',', 1)[1])
else:
missing.append(person)
else:
missing.append(person)
# Load key data from gnupg for use below
if keys:
all_keys = gnupg.list_keys(selectors=keys)
else:
all_keys = {}
if missing:
# Keys are missing, so we try to just search the keychain
all_keys.update(gnupg.list_keys(selectors=missing))
found = []
for key_id, key in all_keys.iteritems():
for uid in key.get("uids", []):
if uid.get("email", None) in missing:
missing.remove(uid["email"])
found.append(uid["email"])
keys.append(key_id)
elif uid.get("email", None) in found:
ambig.append(uid["email"])
# Next, we go make sure all those keys are really in our keychain.
fprints = all_keys.keys()
for key in keys:
if key.startswith('0x'):
key = key[2:]
if key not in fprints:
match = [k for k in fprints if k.endswith(key)]
if len(match) == 0:
missing.append(key)
elif len(match) > 1:
ambig.append(key)
if missing:
raise KeyLookupError(_('Keys missing for %s'
) % ', '.join(missing), missing)
elif ambig:
ambig = list(set(ambig))
raise KeyLookupError(_('Keys ambiguous for %s'
) % ', '.join(ambig), ambig)
return keys
class OpenPGPMimeSigningWrapper(MimeSigningWrapper):
CONTAINER_PARAMS = (('micalg', 'pgp-sha1'),
('protocol', 'application/pgp-signature'))
SIGNATURE_TYPE = 'application/pgp-signature'
SIGNATURE_DESC = 'OpenPGP Digital Signature'
def crypto(self):
return GnuPG(self.config)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeEncryptingWrapper(MimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
def crypto(self):
return GnuPG(self.config)
def get_keys(self, who):
return GetKeys(self.crypto(), self.config, who)
class OpenPGPMimeSignEncryptWrapper(OpenPGPMimeEncryptingWrapper):
CONTAINER_PARAMS = (('protocol', 'application/pgp-encrypted'), )
ENCRYPTION_TYPE = 'application/pgp-encrypted'
ENCRYPTION_VERSION = 1
def crypto(self):
return GnuPG(self.config)
def _encrypt(self, message_text, tokeys=None, armor=False):
from_key = self.get_keys([self.sender])[0]
return self.crypto().encrypt(message_text,
tokeys=tokeys, armor=True,
sign=True, fromkey=from_key)
def _update_crypto_status(self, part):
part.signature_info.part_status = 'verified'
part.encryption_info.part_status = 'decrypted'
class GnuPGExpectScript(threading.Thread):
STARTUP = 'Startup'
START_GPG = 'Start GPG'
FINISHED = 'Finished'
SCRIPT = []
VARIABLES = {}
RUNNING_STATES = [STARTUP, START_GPG]
def __init__(self, sps=None, logfile=None, variables={}, on_complete=None):
threading.Thread.__init__(self)
self.daemon = True
self._lock = threading.RLock()
self.before = ''
with self._lock:
self.state = self.STARTUP
self.logfile = logfile
self.variables = variables or self.VARIABLES
self._on_complete = [on_complete] if on_complete else []
self.gpg = None
self.main_script = self.SCRIPT[:]
self.sps = sps
if sps:
self.variables['passphrase'] = '!!<SPS'
def __str__(self):
return '%s: %s' % (threading.Thread.__str__(self), self.state)
running = property(lambda self: (self.state in self.RUNNING_STATES))
failed = property(lambda self: False)
def __del__(self):
if self.gpg:
self.gpg.close(force=True)
def in_state(self, state):
pass
def set_state(self, state):
self.state = state
self.in_state(state)
def sendline(self, proc, line):
if line == '!!<SPS':
reader = self.sps.get_reader()
while True:
c = reader.read()
if c != '':
proc.stdin.write(c)
else:
proc.stdin.write('\n')
break
else:
proc.stdin.write(line.encode('utf-8'))
proc.stdin.write('\n')
def _expecter(self, proc, exp, timebox):
while timebox[0] > 0:
self.before += proc.stdout.read(1)
if exp in self.before:
self.before = self.before.split(exp)[0]
return True
return False
def expect_exact(self, proc, exp, timeout=None):
from mailpile.util import RunTimed, TimedOut
timeout = timeout if (timeout and timeout > 0) else 5
timebox = [timeout]
self.before = ''
try:
if RunTimed(timeout, self._expecter, proc, exp, timebox):
return True
else:
raise TimedOut()
except TimedOut:
timebox[0] = 0
print 'Boo! %s not found in %s' % (exp, self.before)
raise
def run_script(self, proc, script):
for exp, rpl, tmo, state in script:
self.expect_exact(proc, exp, timeout=tmo)
if rpl:
self.sendline(proc, (rpl % self.variables).strip())
if state:
self.set_state(state)
def gpg_args(self):
return ['--no-use-agent', '--list-keys']
def run(self):
try:
self.set_state(self.START_GPG)
GnuPG(None).chat(self.gpg_args(),
self.run_script, self.main_script)
self.set_state(self.FINISHED)
except:
import traceback
traceback.print_exc()
finally:
with self._lock:
if self.gpg is not None:
self.gpg.close(force=(self.state != self.FINISHED))
self.gpg = None
if self.state != self.FINISHED:
self.state = 'Failed: ' + self.state
for name, callback in self._on_complete:
callback()
self._on_complete = None
def on_complete(self, name, callback):
with self._lock:
if self._on_complete is not None:
if name not in [o[0] for o in self._on_complete]:
self._on_complete.append((name, callback))
else:
callback()
class GnuPGKeyGenerator(GnuPGExpectScript):
"""This is a background thread which generates a new PGP key."""
KEY_SETUP = 'Key Setup'
GATHER_ENTROPY = 'Creating key'
CREATED_KEY = 'Created key'
HAVE_KEY = 'Have Key'
SCRIPT = [
('GET_LINE keygen.algo', '%(keytype)s', -1, KEY_SETUP),
('GET_LINE keygen.size', '%(bits)s', -1, None),
('GET_LINE keygen.valid', '0', -1, None),
('GET_LINE keygen.name', '%(name)s', -1, None),
('GET_LINE keygen.email', '%(email)s', -1, None),
('GET_LINE keygen.comment', '%(comment)s', -1, None),
('GET_HIDDEN passphrase', '%(passphrase)s', -1, None),
('GOT_IT', None, -1, GATHER_ENTROPY),
('KEY_CREATED', None, 1800, CREATED_KEY),
('\n', None, -1, HAVE_KEY)
]
VARIABLES = {
'keytype': '1',
'bits': '4096',
'name': 'Mailpile Generated Key',
'email': '',
'comment': 'www.mailpile.is',
'passphrase': 'mailpile'
}
RUNNING_STATES = (GnuPGExpectScript.RUNNING_STATES +
[KEY_SETUP, GATHER_ENTROPY, HAVE_KEY])
failed = property(lambda self: (not self.running and
not self.generated_key))
def __init__(self, *args, **kwargs):
GnuPGExpectScript.__init__(self, *args, **kwargs)
self.generated_key = None
def gpg_args(self):
return ['--no-use-agent', '--gen-key']
def in_state(self, state):
if state == self.HAVE_KEY:
self.generated_key = self.before.strip().split()[-1]
class GnuPGKeyEditor(GnuPGExpectScript):
"""This is a background thread which edits the UIDs on a PGP key."""
HAVE_SKEY = 'Have Secret Key'
DELETING_UID = 'Deleting a UID'
DELETED_UIDS = 'Deleted UIDs'
ADDING_UID = 'Adding a UID'
ADDED_UID = 'Added a UID'
SAVED = 'Saved keychain'
SCRIPT = [
]
DELETE_SCRIPT = [
('GET_LINE keyedit.prompt', 'uid %(n)s', -1, DELETING_UID),
('GET_LINE keyedit.prompt', 'deluid', -1, DELETING_UID),
('GNUPG', 'Y', -1, None),
]
ADD_UID_SCRIPT = [
('GET_LINE keyedit.prompt', 'adduid', -1, ADDING_UID),
('GET_LINE keygen.name', '%(name)s', -1, None),
('GET_LINE keygen.email', '%(email)s', -1, None),
('GET_LINE keygen.comment', '%(comment)s', -1, None),
('GET_HIDDEN passphrase', '%(passphrase)s', -1, None),
('GOOD_PASSPHRASE', '', -1, ADDED_UID),
]
SAVE_SCRIPT = [
('GET_LINE keyedit.prompt', 'save', -1, SAVED),
]
VARIABLES = {
'name': '',
'email': '',
'comment': '',
'passphrase': 'mailpile'
}
RUNNING_STATES = (GnuPGExpectScript.RUNNING_STATES +
[HAVE_SKEY,
DELETING_UID, DELETED_UIDS, ADDING_UID, ADDED_UID])
def __init__(self, keyid, set_uids=None, deletes=5, **kwargs):
GnuPGExpectScript.__init__(self, **kwargs)
self.keyid = keyid
# First, we try and delete all the existing UIDs.
# We should be able to delete all but the last one..
for i in reversed(range(2, deletes+1)):
for want, snd, tmo, st in self.DELETE_SCRIPT:
self.main_script.append((want, snd % {'n': i}, tmo, st))
# Next, add scripts to add our new UIDs.
first = True
self.uids = set_uids
for uid in set_uids:
# Magic: the in_state() method updates the variables for each
# instance of this script.
self.main_script.extend(self.ADD_UID_SCRIPT)
if first:
# We added one, so we can delete the last of the old ones
for want, snd, tmo, st in self.DELETE_SCRIPT:
self.main_script.append((want, snd % {'n': 1}, tmo, st))
first = False
self.main_script.extend(self.SAVE_SCRIPT)
def in_state(self, state):
if state == self.ADDING_UID:
self.variables = {}
self.variables.update(self.VARIABLES)
self.variables.update(self.uids.pop(0))
if not self.variables.get('name'):
self.variables['name'] = 'An Ony Mouse'
if len(self.variables['name']) < 5:
self.variables['name'] += ' ....'
if self.sps:
self.variables['passphrase'] = '!!<SPS'
def gpg_args(self):
return ['--no-use-agent', '--edit-key', self.keyid]
|
py | b40fa3cc19247fc1840dad4c87c6790116bc0cd0 | # -*- encoding:utf-8 -*-
'''
监控agent模板
'''
import os
import sys
import json
import yaml
import requests
import logging
from logging.handlers import RotatingFileHandler
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, 'ansible_util'))
from ansible_util import AnsiblePlayTask
CONFIG = {}
def init_log():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = RotatingFileHandler(os.path.join(BASE_DIR, 'log', 'agent.log'),
maxBytes=1024 * 1024 * 50,
backupCount=10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
class Agent(object):
def __init__(self):
self.config = self.__get_config()
self.ip_infos = self.config['IPS']
self.logger = init_log()
self.report_server = 'http://{server_ip}/api/pings/?service={service}&&data='.format(
server_ip=self.config['SERVER']['IP'],
service=self.config['SERVER']['SERVICE'],
)
self.auth = (self.config['SERVER']['USER'], self.config['SERVER']['PASSWD'])
def __get_config(self):
config = {}
with open(os.path.join(BASE_DIR, 'config.yaml')) as f:
config = yaml.load(f.read())
return config
def send_heartbeat(self, error_msg=''):
try:
r = requests.post(self.report_server + error_msg, auth=self.auth)
except Exception as e:
self.logger.exception(e)
def get_playbook(self):
return os.path.join(BASE_DIR, 'playbook.yml')
def get_extra_vars(self):
return {
'script': os.path.join(BASE_DIR, 'script.py')
}
def get_ok_result(self, ok_results):
for ok_result in ok_results:
if ok_result["invocation"]["module_name"] == "script":
return ok_result["stdout"]
def analyze(self, ip, result, is_ok=True):
def has_process(p, processes):
for process in processes:
if p in process:
return True
return False
if not is_ok:
return '{}:{}'.format(ip, result[:100])
errors = []
# 1. 读取配置里告警阀值
ip_info = self.ip_infos[ip]
mem_percent = ip_info['BASIC']['MEM']
disk_percent = ip_info['BASIC']['DISK']
cpu_percent = ip_info['BASIC']['CPU']
process_list = ip_info['PROCESS']
# 2. 根据获取机器最新数据和阀值对比
machine_info = json.loads(result.strip())
if machine_info['cpu']['percent'] > cpu_percent:
errors.append('cpu:{}'.format(machine_info['cpu']['percent']))
if machine_info['memory']['percent'] > mem_percent:
errors.append('mem:{}'.format(machine_info['memory']['percent']))
for disk_item in machine_info['disk']:
if disk_item['percent'] > disk_percent:
errors.append('{}:{}'.format(disk_item['device'], disk_item['percent']))
for process in process_list:
if not has_process(process, machine_info['process']):
errors.append('process:{}'.format(process))
# 3. 汇总结果
if not errors:
return
return '{}:{}'.format(ip, ",".join(errors))
def run_imp(self):
play_task = AnsiblePlayTask(host_list=self.ip_infos.keys(),
playbook_path=self.get_playbook(),
private_key_file=self.config['AGENT']['SSH']['KEY'],
extra_vars=self.get_extra_vars()
)
play_task.run()
errors = []
for ip, stat in play_task.summary.items():
# 因为ok个数随着playbook里action的增加个数不一定,所以根据错误来判断是否成功
if not sum((stat["unreachable"], stat["skipped"], stat["failures"])):
result = self.get_ok_result(play_task.results[ip]["ok"])
ret = self.analyze(ip, result)
else:
result = play_task.results[ip]
ret = self.analyze(ip, result, False)
if not ret:
continue
errors.append(ret)
if not errors:
self.send_heartbeat()
else:
self.logger.error(','.join(errors))
self.send_heartbeat(','.join(errors))
def run(self):
self.logger.info('begin monitor')
try:
self.run_imp()
except Exception as e:
self.logger.exception(e)
self.logger.info('end monitor')
if __name__ == '__main__':
a = Agent()
a.run()
|
py | b40fa3d755cbd072220b63d11c20cc5097577035 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdRule(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdRule = True
super(AdRule, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
account_id = 'account_id'
created_by = 'created_by'
created_time = 'created_time'
evaluation_spec = 'evaluation_spec'
execution_spec = 'execution_spec'
id = 'id'
name = 'name'
schedule_spec = 'schedule_spec'
status = 'status'
updated_time = 'updated_time'
class Status:
deleted = 'DELETED'
disabled = 'DISABLED'
enabled = 'ENABLED'
has_issues = 'HAS_ISSUES'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'adrules_library'
# @deprecated api_create is being deprecated
def api_create(self, parent_id, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.adobjects.adaccount import AdAccount
return AdAccount(api=self._api, fbid=parent_id).create_ad_rules_library(fields, params, batch, success, failure, pending)
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdRule,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'evaluation_spec': 'Object',
'execution_spec': 'Object',
'name': 'string',
'schedule_spec': 'Object',
'status': 'status_enum',
}
enums = {
'status_enum': AdRule.Status.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdRule,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_execute(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/execute',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_history(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adrulehistory import AdRuleHistory
param_types = {
'action': 'action_enum',
'hide_no_changes': 'bool',
'object_id': 'string',
}
enums = {
'action_enum': AdRuleHistory.Action.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/history',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdRuleHistory,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdRuleHistory, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_preview(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/preview',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdRule,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdRule, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'account_id': 'string',
'created_by': 'User',
'created_time': 'datetime',
'evaluation_spec': 'AdRuleEvaluationSpec',
'execution_spec': 'AdRuleExecutionSpec',
'id': 'string',
'name': 'string',
'schedule_spec': 'AdRuleScheduleSpec',
'status': 'string',
'updated_time': 'datetime',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['Status'] = AdRule.Status.__dict__.values()
return field_enum_info
|
py | b40fa4b2bd889cbca6278ccdc5f5ef0fd1e7b2cc | from faker import Factory as FakerFactory
import factory
from db.models.build_jobs import BuildJob, BuildJobStatus
from factories.factory_projects import ProjectFactory
from factories.factory_users import UserFactory
from factories.factorycode_reference import CodeReferenceFactory
from polyaxon_schemas.polyaxonfile.specification import BuildSpecification
fake = FakerFactory.create()
class BuildJobFactory(factory.DjangoModelFactory):
config = BuildSpecification.create_specification({'image': 'busybox'})
user = factory.SubFactory(UserFactory)
project = factory.SubFactory(ProjectFactory)
code_reference = factory.SubFactory(CodeReferenceFactory)
class Meta:
model = BuildJob
class BuildJobStatusFactory(factory.DjangoModelFactory):
job = factory.SubFactory(BuildJobFactory)
class Meta:
model = BuildJobStatus
|
py | b40fa5bdc3b875a225791e7787199b199a881753 | #!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for branching Google Test/Mock wiki pages for a new version.
SYNOPSIS
release_docs.py NEW_RELEASE_VERSION
Google Test and Google Mock's external user documentation is in
interlinked wiki files. When we release a new version of
Google Test or Google Mock, we need to branch the wiki files
such that users of a specific version of Google Test/Mock can
look up documenation relevant for that version. This script
automates that process by:
- branching the current wiki pages (which document the
behavior of the SVN trunk head) to pages for the specified
version (e.g. branching FAQ.wiki to V2_6_FAQ.wiki when
NEW_RELEASE_VERSION is 2.6);
- updating the links in the branched files to point to the branched
version (e.g. a link in V2_6_FAQ.wiki that pointed to
Primer.wiki#Anchor will now point to V2_6_Primer.wiki#Anchor).
NOTE: NEW_RELEASE_VERSION must be a NEW version number for
which the wiki pages don't yet exist; otherwise you'll get SVN
errors like "svn: Path 'V1_7_PumpManual.wiki' is not a
directory" when running the script.
EXAMPLE
$ cd PATH/TO/GTEST_SVN_WORKSPACE/trunk
$ scripts/release_docs.py 2.6 # create wiki pages for v2.6
$ svn status # verify the file list
$ svn diff # verify the file contents
$ svn commit -m "release wiki pages for v2.6"
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
import common
# Wiki pages that shouldn't be branched for every gtest/gmock release.
GTEST_UNVERSIONED_WIKIS = ['DevGuide.wiki']
GMOCK_UNVERSIONED_WIKIS = [
'DesignDoc.wiki',
'DevGuide.wiki',
'KnownIssues.wiki'
]
def DropWikiSuffix(wiki_filename):
"""Removes the .wiki suffix (if any) from the given filename."""
return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki')
else wiki_filename)
class WikiBrancher(object):
"""Branches ..."""
def __init__(self, dot_version):
self.project, svn_root_path = common.GetSvnInfo()
if self.project not in ('googletest', 'googlemock'):
sys.exit('This script must be run in a gtest or gmock SVN workspace.')
self.wiki_dir = svn_root_path + '/wiki'
# Turn '2.6' to 'V2_6_'.
self.version_prefix = 'V' + dot_version.replace('.', '_') + '_'
self.files_to_branch = self.GetFilesToBranch()
page_names = [DropWikiSuffix(f) for f in self.files_to_branch]
# A link to Foo.wiki is in one of the following forms:
# [Foo words]
# [Foo#Anchor words]
# [http://code.google.com/.../wiki/Foo words]
# [http://code.google.com/.../wiki/Foo#Anchor words]
# We want to replace 'Foo' with 'V2_6_Foo' in the above cases.
self.search_for_re = re.compile(
# This regex matches either
# [Foo
# or
# /wiki/Foo
# followed by a space or a #, where Foo is the name of an
# unversioned wiki page.
r'(\[|/wiki/)(%s)([ #])' % '|'.join(page_names))
self.replace_with = r'\1%s\2\3' % (self.version_prefix,)
def GetFilesToBranch(self):
"""Returns a list of .wiki file names that need to be branched."""
unversioned_wikis = (GTEST_UNVERSIONED_WIKIS if self.project == 'googletest'
else GMOCK_UNVERSIONED_WIKIS)
return [f for f in os.listdir(self.wiki_dir)
if (f.endswith('.wiki') and
not re.match(r'^V\d', f) and # Excluded versioned .wiki files.
f not in unversioned_wikis)]
def BranchFiles(self):
"""Branches the .wiki files needed to be branched."""
print 'Branching %d .wiki files:' % (len(self.files_to_branch),)
os.chdir(self.wiki_dir)
for f in self.files_to_branch:
command = 'svn cp %s %s%s' % (f, self.version_prefix, f)
print command
os.system(command)
def UpdateLinksInBranchedFiles(self):
for f in self.files_to_branch:
source_file = os.path.join(self.wiki_dir, f)
versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f)
print 'Updating links in %s.' % (versioned_file,)
text = file(source_file, 'r').read()
new_text = self.search_for_re.sub(self.replace_with, text)
file(versioned_file, 'w').write(new_text)
def main():
if len(sys.argv) != 2:
sys.exit(__doc__)
brancher = WikiBrancher(sys.argv[1])
brancher.BranchFiles()
brancher.UpdateLinksInBranchedFiles()
if __name__ == '__main__':
main()
|
py | b40fa750dad17d4b354c9b56024115999b09feca | # Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import pandas as pd
import warnings
def test_df_to_series():
# Ensure metadata is kept when going from df to series
df = pd.read_csv("lux/data/car.csv")
df._repr_html_() # compute metadata
assert df.cardinality is not None
series = df["Weight"]
assert isinstance(series, lux.core.series.LuxSeries), "Derived series is type LuxSeries."
print(df["Weight"]._metadata)
assert df["Weight"]._metadata == [
"_intent",
"data_type",
"unique_values",
"cardinality",
"_rec_info",
"_pandas_only",
"_min_max",
"plotting_style",
"_current_vis",
"_widget",
"_recommendation",
"_prev",
"_history",
"_saved_export",
"name",
], "Metadata is lost when going from Dataframe to Series."
assert df.cardinality is not None, "Metadata is lost when going from Dataframe to Series."
assert series.name == "Weight", "Pandas Series original `name` property not retained."
def test_print_dtypes(global_var):
df = pytest.college_df
with warnings.catch_warnings(record=True) as w:
print(df.dtypes)
assert len(w) == 0, "Warning displayed when printing dtypes"
def test_print_iterrow(global_var):
df = pytest.college_df
with warnings.catch_warnings(record=True) as w:
for index, row in df.iterrows():
print(row)
break
assert len(w) == 0, "Warning displayed when printing iterrow"
|
py | b40fa92e64c45a349b897b9ca56f2f4b8d534582 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List
import torch
from fairseq import utils
from fairseq.data import encoders
from omegaconf import open_dict
from torch import nn
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
"merges.txt": "bpe_merges",
"vocab.json": "bpe_vocab",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, cfg, task, models):
super().__init__()
self.cfg = cfg
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(cfg.generation.replace_unk)
self.tokenizer = encoders.build_tokenizer(cfg.tokenizer)
self.bpe = encoders.build_bpe(cfg.bpe)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], source_ie=None, beam: int = 1, verbose: bool = False,**kwargs
) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, source_ie, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos],\
[hypos[0]["ie_num"] for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
source_ie: List = None,
skip_invalid_size_inputs=False,
inference_step_args=None,
prefix_allowed_tokens_fn=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.deepcopy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(
self.models,
gen_args,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs,source_ie):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool, source_ie=None,
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths, source_ie),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
|
py | b40fa991b656bb15fcd7ed3b9598125667bbef4f | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import json
from io import BytesIO
from typing import List, Optional
from unittest.mock import patch
from zipfile import is_zipfile, ZipFile
from tests.integration_tests.insert_chart_mixin import InsertChartMixin
import pytest
import prison
import yaml
from sqlalchemy.sql import func
from freezegun import freeze_time
from sqlalchemy import and_
from superset import db, security_manager
from superset.models.dashboard import Dashboard
from superset.models.core import FavStar, FavStarClassName
from superset.models.reports import ReportSchedule, ReportScheduleType
from superset.models.slice import Slice
from superset.utils.core import backend
from superset.views.base import generate_download_headers
from tests.integration_tests.base_api_tests import ApiOwnersTestCaseMixin
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.fixtures.importexport import (
chart_config,
database_config,
dashboard_config,
dashboard_export,
dashboard_metadata_config,
dataset_config,
dataset_metadata_config,
)
from tests.integration_tests.utils.get_dashboards import get_dashboards_ids
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
)
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
)
DASHBOARDS_FIXTURE_COUNT = 10
class TestDashboardApi(SupersetTestCase, ApiOwnersTestCaseMixin, InsertChartMixin):
resource_name = "dashboard"
dashboards: List[Dashboard] = []
dashboard_data = {
"dashboard_title": "title1_changed",
"slug": "slug1_changed",
"position_json": '{"b": "B"}',
"css": "css_changed",
"json_metadata": '{"refresh_frequency": 30}',
"published": False,
}
def insert_dashboard(
self,
dashboard_title: str,
slug: Optional[str],
owners: List[int],
roles: List[int] = [],
created_by=None,
slices: Optional[List[Slice]] = None,
position_json: str = "",
css: str = "",
json_metadata: str = "",
published: bool = False,
certified_by: Optional[str] = None,
certification_details: Optional[str] = None,
) -> Dashboard:
obj_owners = list()
obj_roles = list()
slices = slices or []
for owner in owners:
user = db.session.query(security_manager.user_model).get(owner)
obj_owners.append(user)
for role in roles:
role_obj = db.session.query(security_manager.role_model).get(role)
obj_roles.append(role_obj)
dashboard = Dashboard(
dashboard_title=dashboard_title,
slug=slug,
owners=obj_owners,
roles=obj_roles,
position_json=position_json,
css=css,
json_metadata=json_metadata,
slices=slices,
published=published,
created_by=created_by,
certified_by=certified_by,
certification_details=certification_details,
)
db.session.add(dashboard)
db.session.commit()
return dashboard
@pytest.fixture()
def create_dashboards(self):
with self.create_app().app_context():
dashboards = []
admin = self.get_user("admin")
charts = []
half_dash_count = round(DASHBOARDS_FIXTURE_COUNT / 2)
for cx in range(DASHBOARDS_FIXTURE_COUNT):
dashboard = self.insert_dashboard(
f"title{cx}",
f"slug{cx}",
[admin.id],
slices=charts if cx < half_dash_count else [],
certified_by="John Doe",
certification_details="Sample certification",
)
if cx < half_dash_count:
chart = self.insert_chart(f"slice{cx}", [admin.id], 1, params="{}")
charts.append(chart)
dashboard.slices = [chart]
db.session.add(dashboard)
dashboards.append(dashboard)
fav_dashboards = []
for cx in range(half_dash_count):
fav_star = FavStar(
user_id=admin.id, class_name="Dashboard", obj_id=dashboards[cx].id
)
db.session.add(fav_star)
db.session.commit()
fav_dashboards.append(fav_star)
self.dashboards = dashboards
yield dashboards
# rollback changes
for chart in charts:
db.session.delete(chart)
for dashboard in dashboards:
db.session.delete(dashboard)
for fav_dashboard in fav_dashboards:
db.session.delete(fav_dashboard)
db.session.commit()
@pytest.fixture()
def create_dashboard_with_report(self):
with self.create_app().app_context():
admin = self.get_user("admin")
dashboard = self.insert_dashboard(
f"dashboard_report", "dashboard_report", [admin.id]
)
report_schedule = ReportSchedule(
type=ReportScheduleType.REPORT,
name="report_with_dashboard",
crontab="* * * * *",
dashboard=dashboard,
)
db.session.commit()
yield dashboard
# rollback changes
db.session.delete(report_schedule)
db.session.delete(dashboard)
db.session.commit()
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_get_dashboard_datasets(self):
self.login(username="admin")
uri = "api/v1/dashboard/world_health/datasets"
response = self.get_assert_metric(uri, "get_datasets")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode("utf-8"))
dashboard = Dashboard.get("world_health")
expected_dataset_ids = set([s.datasource_id for s in dashboard.slices])
result = data["result"]
actual_dataset_ids = set([dataset["id"] for dataset in result])
self.assertEqual(actual_dataset_ids, expected_dataset_ids)
expected_values = [0, 1] if backend() == "presto" else [0, 1, 2]
self.assertEqual(result[0]["column_types"], expected_values)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_get_dashboard_datasets_not_found(self):
self.login(username="alpha")
uri = "api/v1/dashboard/not_found/datasets"
response = self.get_assert_metric(uri, "get_datasets")
self.assertEqual(response.status_code, 404)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_get_draft_dashboard_datasets(self):
"""
All users should have access to dashboards without roles
"""
self.login(username="gamma")
uri = "api/v1/dashboard/world_health/datasets"
response = self.get_assert_metric(uri, "get_datasets")
self.assertEqual(response.status_code, 200)
@pytest.mark.usefixtures("create_dashboards")
def get_dashboard_by_slug(self):
self.login(username="admin")
dashboard = self.dashboards[0]
uri = f"api/v1/dashboard/{dashboard.slug}"
response = self.get_assert_metric(uri, "get")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode("utf-8"))
self.assertEqual(data["id"], dashboard.id)
@pytest.mark.usefixtures("create_dashboards")
def get_dashboard_by_bad_slug(self):
self.login(username="admin")
dashboard = self.dashboards[0]
uri = f"api/v1/dashboard/{dashboard.slug}-bad-slug"
response = self.get_assert_metric(uri, "get")
self.assertEqual(response.status_code, 404)
@pytest.mark.usefixtures("create_dashboards")
def get_draft_dashboard_by_slug(self):
"""
All users should have access to dashboards without roles
"""
self.login(username="gamma")
dashboard = self.dashboards[0]
uri = f"api/v1/dashboard/{dashboard.slug}"
response = self.get_assert_metric(uri, "get")
self.assertEqual(response.status_code, 200)
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboard_charts(self):
"""
Dashboard API: Test getting charts belonging to a dashboard
"""
self.login(username="admin")
dashboard = self.dashboards[0]
uri = f"api/v1/dashboard/{dashboard.id}/charts"
response = self.get_assert_metric(uri, "get_charts")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode("utf-8"))
self.assertEqual(len(data["result"]), 1)
self.assertEqual(
data["result"][0]["slice_name"], dashboard.slices[0].slice_name
)
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboard_charts_by_slug(self):
"""
Dashboard API: Test getting charts belonging to a dashboard
"""
self.login(username="admin")
dashboard = self.dashboards[0]
uri = f"api/v1/dashboard/{dashboard.slug}/charts"
response = self.get_assert_metric(uri, "get_charts")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode("utf-8"))
self.assertEqual(len(data["result"]), 1)
self.assertEqual(
data["result"][0]["slice_name"], dashboard.slices[0].slice_name
)
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboard_charts_not_found(self):
"""
Dashboard API: Test getting charts belonging to a dashboard that does not exist
"""
self.login(username="admin")
bad_id = self.get_nonexistent_numeric_id(Dashboard)
uri = f"api/v1/dashboard/{bad_id}/charts"
response = self.get_assert_metric(uri, "get_charts")
self.assertEqual(response.status_code, 404)
@pytest.mark.usefixtures("create_dashboards")
def test_get_draft_dashboard_charts(self):
"""
All users should have access to draft dashboards without roles
"""
self.login(username="gamma")
dashboard = self.dashboards[0]
uri = f"api/v1/dashboard/{dashboard.id}/charts"
response = self.get_assert_metric(uri, "get_charts")
assert response.status_code == 200
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboard_charts_empty(self):
"""
Dashboard API: Test getting charts belonging to a dashboard without any charts
"""
self.login(username="admin")
# the fixture setup assigns no charts to the second half of dashboards
uri = f"api/v1/dashboard/{self.dashboards[-1].id}/charts"
response = self.get_assert_metric(uri, "get_charts")
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode("utf-8"))
self.assertEqual(data["result"], [])
def test_get_dashboard(self):
"""
Dashboard API: Test get dashboard
"""
admin = self.get_user("admin")
dashboard = self.insert_dashboard(
"title", "slug1", [admin.id], created_by=admin
)
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard.id}"
rv = self.get_assert_metric(uri, "get")
self.assertEqual(rv.status_code, 200)
expected_result = {
"certified_by": None,
"certification_details": None,
"changed_by": None,
"changed_by_name": "",
"changed_by_url": "",
"charts": [],
"created_by": {"id": 1, "first_name": "admin", "last_name": "user",},
"id": dashboard.id,
"css": "",
"dashboard_title": "title",
"datasources": [],
"json_metadata": "",
"owners": [
{
"id": 1,
"username": "admin",
"first_name": "admin",
"last_name": "user",
}
],
"roles": [],
"position_json": "",
"published": False,
"url": "/superset/dashboard/slug1/",
"slug": "slug1",
"thumbnail_url": dashboard.thumbnail_url,
}
data = json.loads(rv.data.decode("utf-8"))
self.assertIn("changed_on", data["result"])
self.assertIn("changed_on_delta_humanized", data["result"])
for key, value in data["result"].items():
# We can't assert timestamp values
if key not in ("changed_on", "changed_on_delta_humanized",):
self.assertEqual(value, expected_result[key])
# rollback changes
db.session.delete(dashboard)
db.session.commit()
def test_info_dashboard(self):
"""
Dashboard API: Test info
"""
self.login(username="admin")
uri = "api/v1/dashboard/_info"
rv = self.get_assert_metric(uri, "info")
self.assertEqual(rv.status_code, 200)
def test_info_security_database(self):
"""
Dashboard API: Test info security
"""
self.login(username="admin")
params = {"keys": ["permissions"]}
uri = f"api/v1/dashboard/_info?q={prison.dumps(params)}"
rv = self.get_assert_metric(uri, "info")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert set(data["permissions"]) == {"can_read", "can_write", "can_export"}
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_get_dashboard_not_found(self):
"""
Dashboard API: Test get dashboard not found
"""
bad_id = self.get_nonexistent_numeric_id(Dashboard)
self.login(username="admin")
uri = f"api/v1/dashboard/{bad_id}"
rv = self.get_assert_metric(uri, "get")
self.assertEqual(rv.status_code, 404)
def test_get_dashboard_no_data_access(self):
"""
Dashboard API: Test get dashboard without data access
"""
admin = self.get_user("admin")
dashboard = self.insert_dashboard("title", "slug1", [admin.id])
self.login(username="gamma")
uri = f"api/v1/dashboard/{dashboard.id}"
rv = self.client.get(uri)
assert rv.status_code == 200
# rollback changes
db.session.delete(dashboard)
db.session.commit()
def test_get_dashboards_changed_on(self):
"""
Dashboard API: Test get dashboards changed on
"""
from datetime import datetime
import humanize
with freeze_time("2020-01-01T00:00:00Z"):
admin = self.get_user("admin")
dashboard = self.insert_dashboard("title", "slug1", [admin.id])
self.login(username="admin")
arguments = {
"order_column": "changed_on_delta_humanized",
"order_direction": "desc",
}
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
data["result"][0]["changed_on_delta_humanized"],
humanize.naturaltime(datetime.now()),
)
# rollback changes
db.session.delete(dashboard)
db.session.commit()
def test_get_dashboards_filter(self):
"""
Dashboard API: Test get dashboards filter
"""
admin = self.get_user("admin")
gamma = self.get_user("gamma")
dashboard = self.insert_dashboard("title", "slug1", [admin.id, gamma.id])
self.login(username="admin")
arguments = {
"filters": [{"col": "dashboard_title", "opr": "sw", "value": "ti"}]
}
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 1)
arguments = {
"filters": [
{"col": "owners", "opr": "rel_m_m", "value": [admin.id, gamma.id]}
]
}
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 1)
# rollback changes
db.session.delete(dashboard)
db.session.commit()
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboards_title_or_slug_filter(self):
"""
Dashboard API: Test get dashboards title or slug filter
"""
# Test title filter with ilike
arguments = {
"filters": [
{"col": "dashboard_title", "opr": "title_or_slug", "value": "title1"}
],
"order_column": "dashboard_title",
"order_direction": "asc",
"keys": ["none"],
"columns": ["dashboard_title", "slug"],
}
self.login(username="admin")
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 1)
expected_response = [
{"slug": "slug1", "dashboard_title": "title1"},
]
assert data["result"] == expected_response
# Test slug filter with ilike
arguments["filters"][0]["value"] = "slug2"
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 1)
expected_response = [
{"slug": "slug2", "dashboard_title": "title2"},
]
assert data["result"] == expected_response
self.logout()
self.login(username="gamma")
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 0)
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboards_favorite_filter(self):
"""
Dashboard API: Test get dashboards favorite filter
"""
admin = self.get_user("admin")
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(FavStar.user_id == admin.id, FavStar.class_name == "Dashboard")
)
expected_models = (
db.session.query(Dashboard)
.filter(and_(Dashboard.id.in_(users_favorite_query)))
.order_by(Dashboard.dashboard_title.asc())
.all()
)
arguments = {
"filters": [{"col": "id", "opr": "dashboard_is_favorite", "value": True}],
"order_column": "dashboard_title",
"order_direction": "asc",
"keys": ["none"],
"columns": ["dashboard_title"],
}
self.login(username="admin")
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert len(expected_models) == data["count"]
for i, expected_model in enumerate(expected_models):
assert (
expected_model.dashboard_title == data["result"][i]["dashboard_title"]
)
@pytest.mark.usefixtures("create_dashboards")
def test_get_current_user_favorite_status(self):
"""
Dataset API: Test get current user favorite stars
"""
admin = self.get_user("admin")
users_favorite_ids = [
star.obj_id
for star in db.session.query(FavStar.obj_id)
.filter(
and_(
FavStar.user_id == admin.id,
FavStar.class_name == FavStarClassName.DASHBOARD,
)
)
.all()
]
assert users_favorite_ids
arguments = [dash.id for dash in db.session.query(Dashboard.id).all()]
self.login(username="admin")
uri = f"api/v1/dashboard/favorite_status/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
for res in data["result"]:
if res["id"] in users_favorite_ids:
assert res["value"]
@pytest.mark.usefixtures("create_dashboards")
def test_get_dashboards_not_favorite_filter(self):
"""
Dashboard API: Test get dashboards not favorite filter
"""
admin = self.get_user("admin")
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(FavStar.user_id == admin.id, FavStar.class_name == "Dashboard")
)
expected_models = (
db.session.query(Dashboard)
.filter(and_(~Dashboard.id.in_(users_favorite_query)))
.order_by(Dashboard.dashboard_title.asc())
.all()
)
arguments = {
"filters": [{"col": "id", "opr": "dashboard_is_favorite", "value": False}],
"order_column": "dashboard_title",
"order_direction": "asc",
"keys": ["none"],
"columns": ["dashboard_title"],
}
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
self.login(username="admin")
rv = self.client.get(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert len(expected_models) == data["count"]
for i, expected_model in enumerate(expected_models):
assert (
expected_model.dashboard_title == data["result"][i]["dashboard_title"]
)
@pytest.mark.usefixtures("create_dashboards")
def test_gets_certified_dashboards_filter(self):
arguments = {
"filters": [{"col": "id", "opr": "dashboard_is_certified", "value": True,}],
"keys": ["none"],
"columns": ["dashboard_title"],
}
self.login(username="admin")
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], DASHBOARDS_FIXTURE_COUNT)
@pytest.mark.usefixtures("create_dashboards")
def test_gets_not_certified_dashboards_filter(self):
arguments = {
"filters": [
{"col": "id", "opr": "dashboard_is_certified", "value": False,}
],
"keys": ["none"],
"columns": ["dashboard_title"],
}
self.login(username="admin")
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 6)
def create_dashboard_import(self):
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("dashboard_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(dashboard_metadata_config).encode())
with bundle.open(
"dashboard_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dashboard_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
with bundle.open("dashboard_export/charts/imported_chart.yaml", "w") as fp:
fp.write(yaml.safe_dump(chart_config).encode())
with bundle.open(
"dashboard_export/dashboards/imported_dashboard.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dashboard_config).encode())
buf.seek(0)
return buf
def create_invalid_dashboard_import(self):
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("sql/dump.sql", "w") as fp:
fp.write("CREATE TABLE foo (bar INT)".encode())
buf.seek(0)
return buf
def test_delete_dashboard(self):
"""
Dashboard API: Test delete
"""
admin_id = self.get_user("admin").id
dashboard_id = self.insert_dashboard("title", "slug1", [admin_id]).id
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model, None)
def test_delete_bulk_dashboards(self):
"""
Dashboard API: Test delete bulk
"""
admin_id = self.get_user("admin").id
dashboard_count = 4
dashboard_ids = list()
for dashboard_name_index in range(dashboard_count):
dashboard_ids.append(
self.insert_dashboard(
f"title{dashboard_name_index}",
f"slug{dashboard_name_index}",
[admin_id],
).id
)
self.login(username="admin")
argument = dashboard_ids
uri = f"api/v1/dashboard/?q={prison.dumps(argument)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": f"Deleted {dashboard_count} dashboards"}
self.assertEqual(response, expected_response)
for dashboard_id in dashboard_ids:
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model, None)
def test_delete_bulk_dashboards_bad_request(self):
"""
Dashboard API: Test delete bulk bad request
"""
dashboard_ids = [1, "a"]
self.login(username="admin")
argument = dashboard_ids
uri = f"api/v1/dashboard/?q={prison.dumps(argument)}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 400)
def test_delete_not_found_dashboard(self):
"""
Dashboard API: Test not found delete
"""
self.login(username="admin")
dashboard_id = 1000
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 404)
@pytest.mark.usefixtures("create_dashboard_with_report")
def test_delete_dashboard_with_report(self):
"""
Dashboard API: Test delete with associated report
"""
self.login(username="admin")
dashboard = (
db.session.query(Dashboard.id)
.filter(Dashboard.dashboard_title == "dashboard_report")
.one_or_none()
)
uri = f"api/v1/dashboard/{dashboard.id}"
rv = self.client.delete(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 422)
expected_response = {
"message": "There are associated alerts or reports: report_with_dashboard"
}
self.assertEqual(response, expected_response)
def test_delete_bulk_dashboards_not_found(self):
"""
Dashboard API: Test delete bulk not found
"""
dashboard_ids = [1001, 1002]
self.login(username="admin")
argument = dashboard_ids
uri = f"api/v1/dashboard/?q={prison.dumps(argument)}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 404)
@pytest.mark.usefixtures("create_dashboard_with_report", "create_dashboards")
def test_delete_bulk_dashboard_with_report(self):
"""
Dashboard API: Test bulk delete with associated report
"""
self.login(username="admin")
dashboard_with_report = (
db.session.query(Dashboard.id)
.filter(Dashboard.dashboard_title == "dashboard_report")
.one_or_none()
)
dashboards = (
db.session.query(Dashboard)
.filter(Dashboard.dashboard_title.like("title%"))
.all()
)
dashboard_ids = [dashboard.id for dashboard in dashboards]
dashboard_ids.append(dashboard_with_report.id)
uri = f"api/v1/dashboard/?q={prison.dumps(dashboard_ids)}"
rv = self.client.delete(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 422)
expected_response = {
"message": "There are associated alerts or reports: report_with_dashboard"
}
self.assertEqual(response, expected_response)
def test_delete_dashboard_admin_not_owned(self):
"""
Dashboard API: Test admin delete not owned
"""
gamma_id = self.get_user("gamma").id
dashboard_id = self.insert_dashboard("title", "slug1", [gamma_id]).id
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model, None)
def test_delete_bulk_dashboard_admin_not_owned(self):
"""
Dashboard API: Test admin delete bulk not owned
"""
gamma_id = self.get_user("gamma").id
dashboard_count = 4
dashboard_ids = list()
for dashboard_name_index in range(dashboard_count):
dashboard_ids.append(
self.insert_dashboard(
f"title{dashboard_name_index}",
f"slug{dashboard_name_index}",
[gamma_id],
).id
)
self.login(username="admin")
argument = dashboard_ids
uri = f"api/v1/dashboard/?q={prison.dumps(argument)}"
rv = self.client.delete(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {"message": f"Deleted {dashboard_count} dashboards"}
self.assertEqual(response, expected_response)
for dashboard_id in dashboard_ids:
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model, None)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_delete_dashboard_not_owned(self):
"""
Dashboard API: Test delete try not owned
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
existing_slice = (
db.session.query(Slice).filter_by(slice_name="Girl Name Cloud").first()
)
dashboard = self.insert_dashboard(
"title", "slug1", [user_alpha1.id], slices=[existing_slice], published=True
)
self.login(username="alpha2", password="password")
uri = f"api/v1/dashboard/{dashboard.id}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 403)
db.session.delete(dashboard)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_delete_bulk_dashboard_not_owned(self):
"""
Dashboard API: Test delete bulk try not owned
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
existing_slice = (
db.session.query(Slice).filter_by(slice_name="Girl Name Cloud").first()
)
dashboard_count = 4
dashboards = list()
for dashboard_name_index in range(dashboard_count):
dashboards.append(
self.insert_dashboard(
f"title{dashboard_name_index}",
f"slug{dashboard_name_index}",
[user_alpha1.id],
slices=[existing_slice],
published=True,
)
)
owned_dashboard = self.insert_dashboard(
"title_owned",
"slug_owned",
[user_alpha2.id],
slices=[existing_slice],
published=True,
)
self.login(username="alpha2", password="password")
# verify we can't delete not owned dashboards
arguments = [dashboard.id for dashboard in dashboards]
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 403)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": "Forbidden"}
self.assertEqual(response, expected_response)
# nothing is deleted in bulk with a list of owned and not owned dashboards
arguments = [dashboard.id for dashboard in dashboards] + [owned_dashboard.id]
uri = f"api/v1/dashboard/?q={prison.dumps(arguments)}"
rv = self.client.delete(uri)
self.assertEqual(rv.status_code, 403)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": "Forbidden"}
self.assertEqual(response, expected_response)
for dashboard in dashboards:
db.session.delete(dashboard)
db.session.delete(owned_dashboard)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
def test_create_dashboard(self):
"""
Dashboard API: Test create dashboard
"""
admin_id = self.get_user("admin").id
dashboard_data = {
"dashboard_title": "title1",
"slug": "slug1",
"owners": [admin_id],
"position_json": '{"a": "A"}',
"css": "css",
"json_metadata": '{"refresh_frequency": 30}',
"published": True,
}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.post_assert_metric(uri, dashboard_data, "post")
self.assertEqual(rv.status_code, 201)
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(Dashboard).get(data.get("id"))
db.session.delete(model)
db.session.commit()
def test_create_simple_dashboard(self):
"""
Dashboard API: Test create simple dashboard
"""
dashboard_data = {"dashboard_title": "title1"}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 201)
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(Dashboard).get(data.get("id"))
db.session.delete(model)
db.session.commit()
def test_create_dashboard_empty(self):
"""
Dashboard API: Test create empty
"""
dashboard_data = {}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 201)
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(Dashboard).get(data.get("id"))
db.session.delete(model)
db.session.commit()
dashboard_data = {"dashboard_title": ""}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 201)
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(Dashboard).get(data.get("id"))
db.session.delete(model)
db.session.commit()
def test_create_dashboard_validate_title(self):
"""
Dashboard API: Test create dashboard validate title
"""
dashboard_data = {"dashboard_title": "a" * 600}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.post_assert_metric(uri, dashboard_data, "post")
self.assertEqual(rv.status_code, 400)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": {"dashboard_title": ["Length must be between 0 and 500."]}
}
self.assertEqual(response, expected_response)
def test_create_dashboard_validate_slug(self):
"""
Dashboard API: Test create validate slug
"""
admin_id = self.get_user("admin").id
dashboard = self.insert_dashboard("title1", "slug1", [admin_id])
self.login(username="admin")
# Check for slug uniqueness
dashboard_data = {"dashboard_title": "title2", "slug": "slug1"}
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"slug": ["Must be unique"]}}
self.assertEqual(response, expected_response)
# Check for slug max size
dashboard_data = {"dashboard_title": "title2", "slug": "a" * 256}
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 400)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"slug": ["Length must be between 1 and 255."]}}
self.assertEqual(response, expected_response)
db.session.delete(dashboard)
db.session.commit()
def test_create_dashboard_validate_owners(self):
"""
Dashboard API: Test create validate owners
"""
dashboard_data = {"dashboard_title": "title1", "owners": [1000]}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"owners": ["Owners are invalid"]}}
self.assertEqual(response, expected_response)
def test_create_dashboard_validate_roles(self):
"""
Dashboard API: Test create validate roles
"""
dashboard_data = {"dashboard_title": "title1", "roles": [1000]}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"roles": ["Some roles do not exist"]}}
self.assertEqual(response, expected_response)
def test_create_dashboard_validate_json(self):
"""
Dashboard API: Test create validate json
"""
dashboard_data = {"dashboard_title": "title1", "position_json": '{"A:"a"}'}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 400)
dashboard_data = {"dashboard_title": "title1", "json_metadata": '{"A:"a"}'}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 400)
dashboard_data = {
"dashboard_title": "title1",
"json_metadata": '{"refresh_frequency": "A"}',
}
self.login(username="admin")
uri = "api/v1/dashboard/"
rv = self.client.post(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 400)
def test_update_dashboard(self):
"""
Dashboard API: Test update
"""
admin = self.get_user("admin")
admin_role = self.get_role("Admin")
dashboard_id = self.insert_dashboard(
"title1", "slug1", [admin.id], roles=[admin_role.id]
).id
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.put_assert_metric(uri, self.dashboard_data, "put")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model.dashboard_title, self.dashboard_data["dashboard_title"])
self.assertEqual(model.slug, self.dashboard_data["slug"])
self.assertEqual(model.position_json, self.dashboard_data["position_json"])
self.assertEqual(model.css, self.dashboard_data["css"])
self.assertEqual(model.json_metadata, self.dashboard_data["json_metadata"])
self.assertEqual(model.published, self.dashboard_data["published"])
self.assertEqual(model.owners, [admin])
self.assertEqual(model.roles, [admin_role])
db.session.delete(model)
db.session.commit()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_update_dashboard_chart_owners(self):
"""
Dashboard API: Test update chart owners
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
admin = self.get_user("admin")
slices = []
slices.append(
db.session.query(Slice).filter_by(slice_name="Girl Name Cloud").first()
)
slices.append(db.session.query(Slice).filter_by(slice_name="Trends").first())
slices.append(db.session.query(Slice).filter_by(slice_name="Boys").first())
dashboard = self.insert_dashboard("title1", "slug1", [admin.id], slices=slices,)
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard.id}"
dashboard_data = {"owners": [user_alpha1.id, user_alpha2.id]}
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 200)
# verify slices owners include alpha1 and alpha2 users
slices_ids = [slice.id for slice in slices]
# Refetch Slices
slices = db.session.query(Slice).filter(Slice.id.in_(slices_ids)).all()
for slice in slices:
self.assertIn(user_alpha1, slice.owners)
self.assertIn(user_alpha2, slice.owners)
self.assertNotIn(admin, slice.owners)
# Revert owners on slice
slice.owners = []
db.session.commit()
# Rollback changes
db.session.delete(dashboard)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
def test_update_partial_dashboard(self):
"""
Dashboard API: Test update partial
"""
admin_id = self.get_user("admin").id
dashboard_id = self.insert_dashboard("title1", "slug1", [admin_id]).id
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.client.put(
uri, json={"json_metadata": self.dashboard_data["json_metadata"]}
)
self.assertEqual(rv.status_code, 200)
rv = self.client.put(
uri, json={"dashboard_title": self.dashboard_data["dashboard_title"]}
)
self.assertEqual(rv.status_code, 200)
rv = self.client.put(uri, json={"slug": self.dashboard_data["slug"]})
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model.json_metadata, self.dashboard_data["json_metadata"])
self.assertEqual(model.dashboard_title, self.dashboard_data["dashboard_title"])
self.assertEqual(model.slug, self.dashboard_data["slug"])
db.session.delete(model)
db.session.commit()
def test_update_dashboard_new_owner_not_admin(self):
"""
Dashboard API: Test update set new owner implicitly adds logged in owner
"""
gamma = self.get_user("gamma")
alpha = self.get_user("alpha")
dashboard_id = self.insert_dashboard("title1", "slug1", [alpha.id]).id
dashboard_data = {"dashboard_title": "title1_changed", "owners": [gamma.id]}
self.login(username="alpha")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertIn(gamma, model.owners)
self.assertIn(alpha, model.owners)
for slc in model.slices:
self.assertIn(gamma, slc.owners)
self.assertIn(alpha, slc.owners)
db.session.delete(model)
db.session.commit()
def test_update_dashboard_new_owner_admin(self):
"""
Dashboard API: Test update set new owner as admin to other than current user
"""
gamma = self.get_user("gamma")
admin = self.get_user("admin")
dashboard_id = self.insert_dashboard("title1", "slug1", [admin.id]).id
dashboard_data = {"dashboard_title": "title1_changed", "owners": [gamma.id]}
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertIn(gamma, model.owners)
self.assertNotIn(admin, model.owners)
for slc in model.slices:
self.assertIn(gamma, slc.owners)
self.assertNotIn(admin, slc.owners)
db.session.delete(model)
db.session.commit()
def test_update_dashboard_slug_formatting(self):
"""
Dashboard API: Test update slug formatting
"""
admin_id = self.get_user("admin").id
dashboard_id = self.insert_dashboard("title1", "slug1", [admin_id]).id
dashboard_data = {"dashboard_title": "title1_changed", "slug": "slug1 changed"}
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard_id}"
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard_id)
self.assertEqual(model.dashboard_title, "title1_changed")
self.assertEqual(model.slug, "slug1-changed")
db.session.delete(model)
db.session.commit()
def test_update_dashboard_validate_slug(self):
"""
Dashboard API: Test update validate slug
"""
admin_id = self.get_user("admin").id
dashboard1 = self.insert_dashboard("title1", "slug-1", [admin_id])
dashboard2 = self.insert_dashboard("title2", "slug-2", [admin_id])
self.login(username="admin")
# Check for slug uniqueness
dashboard_data = {"dashboard_title": "title2", "slug": "slug 1"}
uri = f"api/v1/dashboard/{dashboard2.id}"
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"slug": ["Must be unique"]}}
self.assertEqual(response, expected_response)
db.session.delete(dashboard1)
db.session.delete(dashboard2)
db.session.commit()
dashboard1 = self.insert_dashboard("title1", None, [admin_id])
dashboard2 = self.insert_dashboard("title2", None, [admin_id])
self.login(username="admin")
# Accept empty slugs and don't validate them has unique
dashboard_data = {"dashboard_title": "title2_changed", "slug": ""}
uri = f"api/v1/dashboard/{dashboard2.id}"
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 200)
db.session.delete(dashboard1)
db.session.delete(dashboard2)
db.session.commit()
def test_update_published(self):
"""
Dashboard API: Test update published patch
"""
admin = self.get_user("admin")
gamma = self.get_user("gamma")
dashboard = self.insert_dashboard("title1", "slug1", [admin.id, gamma.id])
dashboard_data = {"published": True}
self.login(username="admin")
uri = f"api/v1/dashboard/{dashboard.id}"
rv = self.client.put(uri, json=dashboard_data)
self.assertEqual(rv.status_code, 200)
model = db.session.query(Dashboard).get(dashboard.id)
self.assertEqual(model.published, True)
self.assertEqual(model.slug, "slug1")
self.assertIn(admin, model.owners)
self.assertIn(gamma, model.owners)
db.session.delete(model)
db.session.commit()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_update_dashboard_not_owned(self):
"""
Dashboard API: Test update dashboard not owned
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
existing_slice = (
db.session.query(Slice).filter_by(slice_name="Girl Name Cloud").first()
)
dashboard = self.insert_dashboard(
"title", "slug1", [user_alpha1.id], slices=[existing_slice], published=True
)
self.login(username="alpha2", password="password")
dashboard_data = {"dashboard_title": "title1_changed", "slug": "slug1 changed"}
uri = f"api/v1/dashboard/{dashboard.id}"
rv = self.put_assert_metric(uri, dashboard_data, "put")
self.assertEqual(rv.status_code, 403)
db.session.delete(dashboard)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
@pytest.mark.usefixtures(
"load_world_bank_dashboard_with_slices",
"load_birth_names_dashboard_with_slices",
)
def test_export(self):
"""
Dashboard API: Test dashboard export
"""
self.login(username="admin")
dashboards_ids = get_dashboards_ids(db, ["world_health", "births"])
uri = f"api/v1/dashboard/export/?q={prison.dumps(dashboards_ids)}"
# freeze time to ensure filename is deterministic
with freeze_time("2020-01-01T00:00:00Z"):
rv = self.get_assert_metric(uri, "export")
headers = generate_download_headers("json")["Content-Disposition"]
assert rv.status_code == 200
assert rv.headers["Content-Disposition"] == headers
def test_export_not_found(self):
"""
Dashboard API: Test dashboard export not found
"""
self.login(username="admin")
argument = [1000]
uri = f"api/v1/dashboard/export/?q={prison.dumps(argument)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_export_not_allowed(self):
"""
Dashboard API: Test dashboard export not allowed
"""
admin_id = self.get_user("admin").id
dashboard = self.insert_dashboard("title", "slug1", [admin_id], published=False)
self.login(username="gamma")
argument = [dashboard.id]
uri = f"api/v1/dashboard/export/?q={prison.dumps(argument)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
db.session.delete(dashboard)
db.session.commit()
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
def test_export_bundle(self):
"""
Dashboard API: Test dashboard export
"""
dashboards_ids = get_dashboards_ids(db, ["world_health", "births"])
uri = f"api/v1/dashboard/export/?q={prison.dumps(dashboards_ids)}"
self.login(username="admin")
rv = self.client.get(uri)
assert rv.status_code == 200
buf = BytesIO(rv.data)
assert is_zipfile(buf)
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
def test_export_bundle_not_found(self):
"""
Dashboard API: Test dashboard export not found
"""
self.login(username="admin")
argument = [1000]
uri = f"api/v1/dashboard/export/?q={prison.dumps(argument)}"
rv = self.client.get(uri)
assert rv.status_code == 404
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"VERSIONED_EXPORT": True},
clear=True,
)
def test_export_bundle_not_allowed(self):
"""
Dashboard API: Test dashboard export not allowed
"""
admin_id = self.get_user("admin").id
dashboard = self.insert_dashboard("title", "slug1", [admin_id], published=False)
self.login(username="gamma")
argument = [dashboard.id]
uri = f"api/v1/dashboard/export/?q={prison.dumps(argument)}"
rv = self.client.get(uri)
assert rv.status_code == 404
db.session.delete(dashboard)
db.session.commit()
def test_import_dashboard(self):
"""
Dashboard API: Test import dashboard
"""
self.login(username="admin")
uri = "api/v1/dashboard/import/"
buf = self.create_dashboard_import()
form_data = {
"formData": (buf, "dashboard_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
dashboard = (
db.session.query(Dashboard).filter_by(uuid=dashboard_config["uuid"]).one()
)
assert dashboard.dashboard_title == "Test dash"
assert len(dashboard.slices) == 1
chart = dashboard.slices[0]
assert str(chart.uuid) == chart_config["uuid"]
dataset = chart.table
assert str(dataset.uuid) == dataset_config["uuid"]
database = dataset.database
assert str(database.uuid) == database_config["uuid"]
db.session.delete(dashboard)
db.session.delete(chart)
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_dashboard_invalid_file(self):
"""
Dashboard API: Test import invalid dashboard file
"""
self.login(username="admin")
uri = "api/v1/dashboard/import/"
buf = self.create_invalid_dashboard_import()
form_data = {
"formData": (buf, "dashboard_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 400
assert response == {
"errors": [
{
"message": "No valid import files were found",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"issue_codes": [
{
"code": 1010,
"message": (
"Issue 1010 - Superset encountered an "
"error while running a command."
),
}
]
},
}
]
}
def test_import_dashboard_v0_export(self):
num_dashboards = db.session.query(Dashboard).count()
self.login(username="admin")
uri = "api/v1/dashboard/import/"
buf = BytesIO()
buf.write(json.dumps(dashboard_export).encode())
buf.seek(0)
form_data = {
"formData": (buf, "20201119_181105.json"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
assert db.session.query(Dashboard).count() == num_dashboards + 1
dashboard = (
db.session.query(Dashboard).filter_by(dashboard_title="Births 2").one()
)
chart = dashboard.slices[0]
dataset = chart.table
db.session.delete(dashboard)
db.session.delete(chart)
db.session.delete(dataset)
db.session.commit()
def test_import_dashboard_overwrite(self):
"""
Dashboard API: Test import existing dashboard
"""
self.login(username="admin")
uri = "api/v1/dashboard/import/"
buf = self.create_dashboard_import()
form_data = {
"formData": (buf, "dashboard_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# import again without overwrite flag
buf = self.create_dashboard_import()
form_data = {
"formData": (buf, "dashboard_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Error importing dashboard",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"dashboards/imported_dashboard.yaml": "Dashboard already exists and `overwrite=true` was not passed",
"issue_codes": [
{
"code": 1010,
"message": (
"Issue 1010 - Superset encountered an "
"error while running a command."
),
}
],
},
}
]
}
# import with overwrite flag
buf = self.create_dashboard_import()
form_data = {
"formData": (buf, "dashboard_export.zip"),
"overwrite": "true",
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# cleanup
dashboard = (
db.session.query(Dashboard).filter_by(uuid=dashboard_config["uuid"]).one()
)
chart = dashboard.slices[0]
dataset = chart.table
database = dataset.database
db.session.delete(dashboard)
db.session.delete(chart)
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_dashboard_invalid(self):
"""
Dashboard API: Test import invalid dashboard
"""
self.login(username="admin")
uri = "api/v1/dashboard/import/"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("dashboard_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(dataset_metadata_config).encode())
with bundle.open(
"dashboard_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open(
"dashboard_export/datasets/imported_dataset.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
with bundle.open("dashboard_export/charts/imported_chart.yaml", "w") as fp:
fp.write(yaml.safe_dump(chart_config).encode())
with bundle.open(
"dashboard_export/dashboards/imported_dashboard.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(dashboard_config).encode())
buf.seek(0)
form_data = {
"formData": (buf, "dashboard_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"errors": [
{
"message": "Error importing dashboard",
"error_type": "GENERIC_COMMAND_ERROR",
"level": "warning",
"extra": {
"metadata.yaml": {"type": ["Must be equal to Dashboard."]},
"issue_codes": [
{
"code": 1010,
"message": (
"Issue 1010 - Superset encountered "
"an error while running a command."
),
}
],
},
}
]
}
def test_get_all_related_roles(self):
"""
API: Test get filter related roles
"""
self.login(username="admin")
uri = f"api/v1/dashboard/related/roles"
rv = self.client.get(uri)
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
roles = db.session.query(security_manager.role_model).all()
expected_roles = [str(role) for role in roles]
assert response["count"] == len(roles)
response_roles = [result["text"] for result in response["result"]]
for expected_role in expected_roles:
assert expected_role in response_roles
def test_get_filter_related_roles(self):
"""
API: Test get filter related roles
"""
self.login(username="admin")
argument = {"filter": "alpha"}
uri = f"api/v1/dashboard/related/roles?q={prison.dumps(argument)}"
rv = self.client.get(uri)
assert rv.status_code == 200
response = json.loads(rv.data.decode("utf-8"))
assert response["count"] == 1
response_roles = [result["text"] for result in response["result"]]
assert "Alpha" in response_roles
|
py | b40fabe1b86063ddbbe77815312d7206d63fa421 | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
return frozenset(res)
|
py | b40fac1f50dea0b4cb613856e8a8d00f165e1543 | import inspect
import itertools
import logging
import time
from typing import (
Any,
Callable,
List,
Iterator,
Iterable,
Generic,
Union,
Optional,
TYPE_CHECKING,
)
import ray
from ray.data.context import DatasetContext
from ray.data.dataset import Dataset, T, U
from ray.data.impl.pipeline_executor import (
PipelineExecutor,
PipelineSplitExecutorCoordinator,
)
from ray.data.block import Block
from ray.data.row import TableRow
from ray.data.impl import progress_bar
from ray.data.impl.block_batching import batch_blocks, BatchType
from ray.data.impl.block_list import BlockList
from ray.data.impl.plan import ExecutionPlan
from ray.data.impl.stats import DatasetPipelineStats, DatasetStats
from ray.util.annotations import PublicAPI, DeveloperAPI
if TYPE_CHECKING:
import pyarrow
logger = logging.getLogger(__name__)
# Operations that can be naively applied per dataset row in the pipeline.
_PER_DATASET_OPS = ["map", "map_batches", "add_column", "flat_map", "filter"]
# Operations that apply to each dataset holistically in the pipeline.
_HOLISTIC_PER_DATASET_OPS = ["repartition", "random_shuffle", "sort"]
# Similar to above but we should force evaluation immediately.
_PER_DATASET_OUTPUT_OPS = [
"write_json",
"write_csv",
"write_parquet",
"write_datasource",
]
# Operations that operate over the stream of output batches from the pipeline.
_OUTPUT_ITER_OPS = ["take", "take_all", "show", "to_tf", "to_torch"]
@PublicAPI
class DatasetPipeline(Generic[T]):
"""Implements a pipeline of Datasets.
DatasetPipelines implement pipelined execution. This allows for the
overlapped execution of data input (e.g., reading files), computation
(e.g. feature preprocessing), and output (e.g., distributed ML training).
A DatasetPipeline can be created by either repeating a Dataset
(``ds.repeat(times=None)``), by turning a single Dataset into a pipeline
(``ds.window(blocks_per_window=10)``), or defined explicitly using
``DatasetPipeline.from_iterable()``.
DatasetPipeline supports the all the per-record transforms of Datasets
(e.g., map, flat_map, filter), holistic transforms (e.g., repartition),
and output methods (e.g., iter_rows, to_tf, to_torch, write_datasource).
"""
def __init__(
self,
base_iterable: Iterable[Callable[[], Dataset[T]]],
stages: List[Callable[[Dataset[Any]], Dataset[Any]]] = None,
length: int = None,
progress_bars: bool = progress_bar._enabled,
_executed: List[bool] = None,
):
"""Construct a DatasetPipeline (internal API).
The constructor is not part of the DatasetPipeline API. Use the
``Dataset.repeat()``, ``Dataset.window()``, or
``DatasetPipeline.from_iterable()`` methods to construct a pipeline.
"""
self._base_iterable = base_iterable
self._stages = stages or []
self._optimized_stages = None
self._length = length
self._progress_bars = progress_bars
self._uuid = None # For testing only.
# Whether the pipeline execution has started.
# This variable is shared across all pipelines descending from this.
self._executed = _executed or [False]
self._dataset_iter = None
self._first_dataset = None
self._schema = None
self._stats = DatasetPipelineStats()
def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]:
"""Return a local row iterator over the data in the pipeline.
If the dataset is a tabular dataset (Arrow/Pandas blocks), dict-like mappings
:py:class:`~ray.data.row.TableRow` are yielded for each row by the iterator.
If the dataset is not tabular, the raw row is yielded.
Examples:
>>> import ray
>>> for i in ray.data.range(1000000).repeat(5).iter_rows(): # doctest: +SKIP
... print(i) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
Returns:
A local iterator over the records in the pipeline.
"""
def gen_rows() -> Iterator[Union[T, TableRow]]:
time_start = time.perf_counter()
for ds in self.iter_datasets():
wait_start = time.perf_counter()
for row in ds.iter_rows(prefetch_blocks=prefetch_blocks):
self._stats.iter_wait_s.add(time.perf_counter() - wait_start)
with self._stats.iter_user_s.timer():
yield row
wait_start = time.perf_counter()
self._stats.iter_total_s.add(time.perf_counter() - time_start)
return gen_rows()
def iter_batches(
self,
*,
prefetch_blocks: int = 0,
batch_size: int = None,
batch_format: str = "native",
drop_last: bool = False,
) -> Iterator[BatchType]:
"""Return a local batched iterator over the data in the pipeline.
Examples:
>>> import ray
>>> ds = ray.data.range(1000000).repeat(5) # doctest: +SKIP
>>> for pandas_df in ds.iter_batches(): # doctest: +SKIP
... print(pandas_df) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
Specify "native" to use the current block format (promoting
Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``. Default is "native".
drop_last: Whether to drop the last batch if it's incomplete.
Returns:
An iterator over record batches.
"""
time_start = time.perf_counter()
yield from batch_blocks(
self._iter_blocks(),
self._stats,
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format=batch_format,
drop_last=drop_last,
)
self._stats.iter_total_s.add(time.perf_counter() - time_start)
def _iter_blocks(self) -> Iterator[Block]:
ds_wait_start = time.perf_counter()
for ds in self.iter_datasets():
self._stats.iter_ds_wait_s.add(time.perf_counter() - ds_wait_start)
yield from ds._plan.execute().iter_blocks()
ds_wait_start = time.perf_counter()
def split(
self, n: int, *, equal: bool = False, locality_hints: List[Any] = None
) -> List["DatasetPipeline[T]"]:
"""Split the pipeline into ``n`` disjoint pipeline shards.
This returns a list of sub-pipelines that can be passed to Ray tasks
and actors and used to read the pipeline records in parallel.
Examples:
>>> import ray
>>> pipe = ray.data.range(10).repeat(50) # doctest: +SKIP
>>> workers = ... # doctest: +SKIP
>>> # Split up a pipeline to process over `n` worker actors.
>>> shards = pipe.split( # doctest: +SKIP
... len(workers), locality_hints=workers)
>>> for shard, worker in zip(shards, workers): # doctest: +SKIP
... worker.consume.remote(shard) # doctest: +SKIP
Time complexity: O(1)
Implementation detail: this launches a coordinator actor that is used
to execute the pipeline and push data blocks to each pipeline shard.
Reading from an individual shard will be blocked if other shards are
falling behind. A warning will be printed if a shard has been blocked
on read for more than 10 seconds.
Args:
n: Number of child pipelines to return.
equal: Whether to guarantee each split has an equal
number of records. This may drop records if they cannot be
divided equally among the splits.
locality_hints: A list of Ray actor handles of size ``n``. The
system will try to co-locate the blocks of the ith pipeline
shard with the ith actor to maximize data locality.
Returns:
A list of ``n`` disjoint pipeline splits.
"""
return self._split(
n,
lambda ds, equal=equal: ds.split(
n, equal=equal, locality_hints=locality_hints
),
)
def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline[T]"]:
"""Split the datasets within the pipeline at the given indices
(like np.split).
This will split each dataset contained within this pipeline, thereby
producing len(indices) + 1 pipelines with the first pipeline containing
the [0, indices[0]) slice from each dataset, the second pipeline
containing the [indices[0], indices[1]) slice from each dataset, and so
on, with the final pipeline will containing the
[indices[-1], self.count()) slice from each dataset.
Examples:
>>> import ray
>>> p1, p2, p3 = ray.data.range( # doctest: +SKIP
... 8).repeat(2).split_at_indices([2, 5]) # doctest: +SKIP
>>> p1.take() # doctest: +SKIP
[0, 1, 0, 1]
>>> p2.take() # doctest: +SKIP
[2, 3, 4, 2, 3, 4]
>>> p3.take() # doctest: +SKIP
[5, 6, 7, 5, 6, 7]
Time complexity: O(num splits)
See also: ``DatasetPipeline.split``
Args:
indices: List of sorted integers which indicate where the pipeline
will be split. If an index exceeds the length of the pipeline,
an empty pipeline will be returned.
Returns:
The pipeline splits.
"""
if len(indices) < 1:
raise ValueError("indices must be at least of length 1")
if sorted(indices) != indices:
raise ValueError("indices must be sorted")
if indices[0] < 0:
raise ValueError("indices must be positive")
return self._split(len(indices) + 1, lambda ds: ds.split_at_indices(indices))
def _split(self, n: int, splitter: Callable[[Dataset], "DatasetPipeline[T]"]):
resources = {}
if not ray.util.client.ray.is_connected():
# Pin the coordinator (and any child actors) to the local node to avoid
# errors during node failures. If the local node dies, then the driver
# will fate-share with the coordinator anyway.
resources["node:{}".format(ray.util.get_node_ip_address())] = 0.0001
ctx = DatasetContext.get_current()
coordinator = PipelineSplitExecutorCoordinator.options(
resources=resources,
scheduling_strategy=ctx.scheduling_strategy,
).remote(self, n, splitter, DatasetContext.get_current())
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
class SplitIterator:
def __init__(self, split_index, coordinator):
self.split_index = split_index
self.coordinator = coordinator
self.warn_threshold = 100
self.wait_delay_s = 0.1
def __iter__(self):
return self
def __next__(self):
ds = None
tries = 0
while ds is None:
ds = ray.get(
self.coordinator.next_dataset_if_ready.remote(self.split_index)
)
# Wait for other shards to catch up reading.
if not ds:
time.sleep(self.wait_delay_s)
tries += 1
if tries > self.warn_threshold:
print(
"Warning: reader on shard {} of the pipeline "
"has been blocked more than {}s waiting for "
"other readers to catch up. All pipeline shards "
"must be read from concurrently.".format(
self.split_index,
self.wait_delay_s * self.warn_threshold,
)
)
self.warn_threshold *= 2
return lambda: ds
return [
# Disable progress bars for the split readers since they would
# overwhelm the console.
DatasetPipeline(
SplitIterator(idx, coordinator),
length=self._length,
progress_bars=False,
)
for idx in range(n)
]
def rewindow(
self, *, blocks_per_window: int, preserve_epoch: bool = True
) -> "DatasetPipeline[T]":
"""Change the windowing (blocks per dataset) of this pipeline.
Changes the windowing of this pipeline to the specified size. For
example, if the current pipeline has two blocks per dataset, and
`.rewindow(blocks_per_window=4)` is requested, adjacent datasets will
be merged until each dataset is 4 blocks. If
`.rewindow(blocks_per_window)` was requested the datasets will be
split into smaller windows.
Args:
blocks_per_window: The new target blocks per window.
preserve_epoch: Whether to preserve epoch boundaries. If set to
False, then windows can contain data from two adjacent epochs.
"""
class WindowIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
self._buffer: Optional[Dataset[T]] = None
def __next__(self) -> Dataset[T]:
try:
# Merge windows until we meet the requested window size.
if self._buffer is None:
self._buffer = next(self._original_iter)
while self._buffer.num_blocks() < blocks_per_window:
next_ds = next(self._original_iter)
if (
preserve_epoch
and self._buffer._get_epoch() != next_ds._get_epoch()
):
partial_window = self._buffer
self._buffer = next_ds
return lambda: partial_window
else:
self._buffer = self._buffer.union(next_ds)
# Slice off the left-most chunk and return it.
res, self._buffer = self._buffer._divide(blocks_per_window)
assert res.num_blocks() <= blocks_per_window, res
if self._buffer.num_blocks() == 0:
self._buffer = None
return lambda: res
except StopIteration:
# Return the left-over data as a single window.
if self._buffer and self._buffer.num_blocks() > 0:
res = self._buffer
assert res.num_blocks() <= blocks_per_window, res
self._buffer = None
return lambda: res
else:
raise
class WindowIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return WindowIterator(self._original_iter)
if self._length == float("inf"):
length = float("inf")
else:
length = None
return DatasetPipeline(WindowIterable(self.iter_datasets()), length=length)
def repeat(self, times: int = None) -> "DatasetPipeline[T]":
"""Repeat this pipeline a given number or times, or indefinitely.
This operation is only allowed for pipelines of a finite length. An
error will be raised for pipelines of infinite length.
Note that every repeat of the pipeline is considered an "epoch" for
the purposes of ``iter_epochs()``. If there are multiple repeat calls,
the latest repeat takes precedence for the purpose of defining epochs.
Args:
times: The number of times to loop over this pipeline, or None
to repeat indefinitely.
"""
if self._length == float("inf"):
raise ValueError("Cannot repeat a pipeline of infinite length.")
class RepeatIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
# Holds results to repeat.
self._results = []
# Incrementing cursor over results.
self._i = 0
# This is calculated later.
self._max_i = None
def __next__(self) -> Dataset[T]:
# Still going through the original pipeline.
if self._original_iter:
try:
make_ds = next(self._original_iter)
self._results.append(make_ds)
def gen():
res = make_ds()
res._set_epoch(0)
return res
return gen
except StopIteration:
self._original_iter = None
# Calculate the cursor limit.
if times:
self._max_i = len(self._results) * (times - 1)
else:
self._max_i = float("inf")
# Going through a repeat of the pipeline.
if self._i < self._max_i:
make_ds = self._results[self._i % len(self._results)]
epoch = 1 + self._i // len(self._results)
def gen():
res = make_ds()
res._set_epoch(epoch)
return res
self._i += 1
return gen
else:
raise StopIteration
class RepeatIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return RepeatIterator(self._original_iter)
if not times:
length = float("inf")
elif times and self._length:
length = times * self._length
else:
length = None
return DatasetPipeline(
RepeatIterable(iter(self._base_iterable)),
stages=self._stages.copy(),
length=length,
)
def schema(
self, fetch_if_missing: bool = False
) -> Union[type, "pyarrow.lib.Schema"]:
"""Return the schema of the dataset pipeline.
For datasets of Arrow records, this will return the Arrow schema.
For dataset of Python objects, this returns their Python type.
Note: This is intended to be a method for peeking schema before
the execution of DatasetPipeline. If execution has already started,
it will simply return the cached schema from the previous call.
Time complexity: O(1)
Args:
fetch_if_missing: If True, synchronously fetch the schema if it's
not known. Default is False, where None is returned if the
schema is not known.
Returns:
The Python type or Arrow schema of the records, or None if the
schema is not known.
"""
if not self._executed[0]:
self._schema = self._peek().schema(fetch_if_missing)
return self._schema
def count(self) -> int:
"""Count the number of records in the dataset pipeline.
This blocks until the entire pipeline is fully executed.
Time complexity: O(dataset size / parallelism)
Returns:
The number of records in the dataset pipeline.
"""
if self._length == float("inf"):
raise ValueError("Cannot count a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [len(batch)])
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def sum(self) -> int:
"""Sum the records in the dataset pipeline.
This blocks until the entire pipeline is fully executed.
Time complexity: O(dataset size / parallelism)
Returns:
The sum of the records in the dataset pipeline.
"""
if self._length == float("inf"):
raise ValueError("Cannot sum a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [batch.sum()[0]], batch_format="pandas")
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def show_windows(self, limit_per_dataset: int = 10) -> None:
"""Print up to the given number of records from each window/dataset.
This is helpful as a debugging tool for understanding the structure of
dataset pipelines.
Args:
limit_per_dataset: Rows to print per window/dataset.
"""
epoch = None
for i, ds in enumerate(self.iter_datasets()):
if ds._get_epoch() != epoch:
epoch = ds._get_epoch()
print("------ Epoch {} ------".format(epoch))
print("=== Window {} ===".format(i))
ds.show(limit_per_dataset)
def iter_epochs(self, max_epoch: int = -1) -> Iterator["DatasetPipeline[T]"]:
"""Split this pipeline up by epoch.
This allows reading of data per-epoch for repeated Datasets, which is
useful for ML training. For example, ``ray.data.range(10).repeat(50)``
generates a pipeline with 500 rows total split across 50 epochs. This
method allows iterating over the data individually per epoch
(repetition) of the original data.
Args:
max_epoch: If greater than zero, stop after the given number of epochs.
Examples:
>>> import ray
>>> epochs = ray.data.range(10).repeat(50).iter_epochs() # doctest: +SKIP
>>> for i, epoch in enumerate(epochs): # doctest: +SKIP
... print("Epoch", i) # doctest: +SKIP
... for row in epoch.iter_rows(): # doctest: +SKIP
... print(row) # doctest: +SKIP
Returns:
Iterator over epoch objects, where each epoch is a DatasetPipeline
containing data from that epoch only.
"""
class Peekable:
def __init__(self, base_iter: Iterator[T]):
self._iter = base_iter
self._buffer = None
def _fill_buffer_if_possible(self):
if self._buffer is None:
try:
self._buffer = next(self._iter)
assert self._buffer is not None
except StopIteration:
pass
def peek(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
return self._buffer
def __next__(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
item = self._buffer
self._buffer = None
return item
class SingleEpochIterator:
def __init__(self, peekable_iter: Iterator[Dataset[T]], epoch: int):
self._iter = peekable_iter
self._epoch = epoch
def __next__(self) -> Dataset[T]:
if self._iter.peek()._get_epoch() > self._epoch:
raise StopIteration
ds = next(self._iter)
return lambda: ds
def __iter__(self):
return self
class EpochDelimitedIterator:
def __init__(self, pipe, max_epoch):
self._iter = Peekable(pipe.iter_datasets())
self._cur_epoch = None
self._max_epoch = max_epoch
def __next__(self) -> "DatasetPipeline[T]":
if self._cur_epoch is None:
self._cur_epoch = self._iter.peek()._get_epoch()
else:
self._cur_epoch += 1
if self._max_epoch > 0 and self._cur_epoch >= self._max_epoch:
raise StopIteration
warned = False
while self._iter.peek()._get_epoch() < self._cur_epoch:
if not warned:
warned = True
logger.warn(
"Data from epoch {} was not fully read, "
"skipping to next epoch.".format(self._cur_epoch - 1)
)
next(self._iter)
epoch_pipe = DatasetPipeline.from_iterable(
SingleEpochIterator(self._iter, epoch=self._cur_epoch)
)
return epoch_pipe
def __iter__(self):
return self
return EpochDelimitedIterator(self, max_epoch)
@DeveloperAPI
def iter_datasets(self) -> Iterator[Dataset[T]]:
"""Iterate over the output datasets of this pipeline.
Returns:
Iterator over the datasets outputted from this pipeline.
"""
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
if self._first_dataset is None:
self._peek()
iter = itertools.chain([self._first_dataset], self._dataset_iter)
self._first_dataset = None
self._dataset_iter = None
return iter
@DeveloperAPI
def foreach_window(
self, fn: Callable[[Dataset[T]], Dataset[U]]
) -> "DatasetPipeline[U]":
"""Apply a transform to each dataset/window in this pipeline.
Args:
fn: The function to transform each dataset with.
Returns:
The transformed DatasetPipeline.
"""
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
return DatasetPipeline(
self._base_iterable,
self._stages + [fn],
self._length,
self._progress_bars,
_executed=self._executed,
)
def stats(self, exclude_first_window: bool = True) -> str:
"""Returns a string containing execution timing information.
Args:
exclude_first_window: Whether to exclude the first window from
the pipeline time breakdown. This is generally a good idea
since there is always a stall waiting for the first window to
be initially computed, which can be misleading in the stats.
"""
return self._stats.summary_string(exclude_first_window)
@staticmethod
def from_iterable(
iterable: Iterable[Callable[[], Dataset[T]]],
) -> "DatasetPipeline[T]":
"""Create a pipeline from an sequence of Dataset producing functions.
Args:
iterable: A finite or infinite-length sequence of functions that
each produce a Dataset when called.
"""
if hasattr(iterable, "__len__"):
length = len(iterable)
else:
length = None
return DatasetPipeline(iterable, length=length)
def __repr__(self) -> str:
return "DatasetPipeline(num_windows={}, num_stages={})".format(
self._length, 1 + len(self._stages)
)
def __str__(self) -> str:
return repr(self)
def _get_uuid(self) -> str:
return self._uuid
def _set_uuid(self, uuid: str) -> None:
self._uuid = uuid
def _optimize_stages(self):
"""Optimize this pipeline, fusing stages together as possible."""
context = DatasetContext.get_current()
if not context.optimize_fuse_stages:
self._optimized_stages = self._stages
return
# This dummy dataset will be used to get a set of optimized stages.
dummy_ds = Dataset(
ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)),
0,
True,
)
# Apply all pipeline operations to the dummy dataset.
for stage in self._stages:
dummy_ds = stage(dummy_ds)
# Get the optimized stages.
_, _, stages = dummy_ds._plan._optimize()
# Apply these optimized stages to the datasets underlying the pipeline.
# These optimized stages will be executed by the PipelineExecutor.
optimized_stages = []
for stage in stages:
optimized_stages.append(
lambda ds, stage=stage: Dataset(
ds._plan.with_stage(stage), ds._epoch, True
)
)
self._optimized_stages = optimized_stages
def _peek(self) -> Dataset[T]:
if self._first_dataset is None:
self._optimize_stages()
self._dataset_iter = PipelineExecutor(self)
self._first_dataset = next(self._dataset_iter)
return self._first_dataset
for method in _PER_DATASET_OPS:
def _make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
setattr(DatasetPipeline, method, _make_impl(method))
for method in _HOLISTIC_PER_DATASET_OPS:
def _make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
def _deprecation_warning(method: str):
def impl(*a, **kw):
raise DeprecationWarning(
"`{}` has been renamed to `{}_each_window`.".format(method, method)
)
return impl
setattr(DatasetPipeline, method, _deprecation_warning(method))
setattr(DatasetPipeline, method + "_each_window", _make_impl(method))
for method in _PER_DATASET_OUTPUT_OPS:
def _make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
uuid = None
for i, ds in enumerate(self.iter_datasets()):
if uuid is None:
uuid = self._get_uuid() or ds._get_uuid()
ds._set_uuid(f"{uuid}_{i:06}")
getattr(ds, method)(*args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` on each output dataset of this pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, _make_impl(method))
for method in _OUTPUT_ITER_OPS:
def _make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
return delegate(self, *args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` over the stream of output batches from the pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, _make_impl(method))
|
py | b40fad1cfb4518d285d07044f76fd4331981c3bd | from imageio import imread
# import numpy as np
from PoisDenoiser.projection import Projection as proj, is_in_C as cond
from PoisDenoiser.utils import psnr
import torch as th
from tqdm import tqdm
'''
This module contains all necessary blocks for PGM algorithm,
including TV regularizations with gradients.
'''
def get_deltas(img):
'''
Returns discrete derivatives of the image "img" by columns and rows, respectively.
If img.shape == (B,C,H,W) then returned delta_x.shape == delta_y.shape == (B,C,H,W).
For delta_x the last column will always be 0, as delta_y's last row.
'''
assert img.dim() == 4 # unsqueeze the dimension if necessary.
img = img.type(th.FloatTensor)
img_pad_x = th.nn.functional.pad(img, pad=(0,1, 0,0), mode='replicate')
img_pad_y = th.nn.functional.pad(img, pad=(0,0, 0,1), mode='replicate')
delta_x = img_pad_x[:,:,:,1:] - img_pad_x[:,:,:,:-1]
delta_y = img_pad_y[:,:,1:,:] - img_pad_y[:,:,:-1,:]
return delta_x, delta_y
def reg_TV1_1(img=None, deltas=None):
if deltas is None and img is None:
raise Exception('Check inputs. Some input must be given!')
if img is not None:
deltas = get_deltas(img)
# Now "deltas" are defined.
# Let's calculate TV regularizator...
return th.sum( th.abs(deltas[0]) + th.abs(deltas[1]) )
def reg_TV1_2(img=None, deltas=None):
if deltas is None and img is None:
raise Exception('Check inputs. Some input must be given!')
if img is not None:
deltas = get_deltas(img)
# Now "deltas" are defined.
# Let's calculate TV regularizator...
return th.sum( th.norm(deltas[0]) + th.norm(deltas[1]) )
def reg_TV1_1_grad(img=None, deltas=None):
if deltas is None and img is None:
raise Exception('Check inputs. Some input must be given!')
if not (img is None):
deltas = get_deltas(img)
# Now "deltas" are defined.
# Let's calculate the gradient of TV regularizator...
sign_x = th.sign(deltas[0])
sign_y = th.sign(deltas[1])
sign_x_pad = th.nn.functional.pad(sign_x, (1,0, 0,0), mode='constant')
sign_y_pad = th.nn.functional.pad(sign_y, (0,0, 1,0), mode='constant')
grad = - sign_y_pad[:,:,1:,:] - sign_x_pad[:,:,:,1:] + sign_y_pad[:,:,:-1,:] + sign_x_pad[:,:,:,:-1]
return grad
def reg_TV1_2_grad(img=None, deltas=None):
if deltas is None and img is None:
raise Exception('Check inputs. Some input must be given!')
if not (img is None):
delta_x, delta_y = get_deltas(img)
# Now "deltas" are defined.
# Let's calculate the gradient of TV regularizator...
delta_x_pad = th.nn.functional.pad(delta_x, (1,0, 0,0), mode='constant')
delta_y_pad = th.nn.functional.pad(delta_y, (0,0, 1,0), mode='constant')
grad = ( delta_y_pad[:,:,:-1,:] - delta_y_pad[:,:,1:,:] ) / th.norm(delta_y) + \
( delta_x_pad[:,:,:,:-1] - delta_x_pad[:,:,:,1:] ) / th.norm(delta_x)
return grad
def get_lr(epoch, lr0=1, k=0.1):
# Let's make it exponentially decreasing...
arg = th.FloatTensor([-k*epoch])
lr = th.FloatTensor([lr0]) * th.exp(arg)
return lr
def PGM_step(img, noisy, epoch, reg, lr0_k=(1,0.01), \
fixed=True, num_iters=None):
lr0, k = lr0_k[0], lr0_k[1]
lr = get_lr(epoch, lr0=lr0, k=k)
img_new = img - lr*reg(img=img)
lower_bound = 1e-8
img_new = th.clamp(img_new, lower_bound, img_new.max())
if fixed:
img_new = proj(img_new, noisy, fixed=fixed, num_iters=num_iters) # projection on Poissonian set
else:
img_new = proj(img_new, noisy, eps=1e-2, fixed=fixed)
return img_new
from tqdm import tqdm
def do_denoise(noisy_img, reg, num_epochs=100, lr0_k=(1,0.1), \
fixed=True, num_iters=10, out_psnrs=False, ref_image=None):
img_estim = noisy_img.clone()
if out_psnrs and ref_image is not None:
psnrs = [psnr(ref_image, img_estim)]
for epoch in tqdm(range(num_epochs)):
if fixed:
img_estim = PGM_step(img_estim, noisy_img, epoch, reg=reg, \
lr0_k=lr0_k, fixed=fixed, num_iters=num_iters)
else:
img_estim = PGM_step(img_estim, noisy_img, epoch, reg=reg, \
lr0_k=lr0_k, fixed=fixed)
if out_psnrs and ref_image is not None:
psnrs.append(psnr(ref_image, img_estim))
return (img_estim, psnrs) if out_psnrs else img_estim |
py | b40fad332bfa418944f3c87a97f93c7e776df88b | # orm/relationships.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`.relationship`.
"""
from __future__ import absolute_import
import collections
import weakref
from . import attributes
from . import dependency
from . import mapper as mapperlib
from .base import state_str
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import operators
from ..sql import visitors
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(
expression._clause_element_as_expr(expr), {"remote": True}
)
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(
expression._clause_element_as_expr(expr), {"foreign": True}
)
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties", add_to_all=True)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = "relationship"
_dependency_processor = None
@util.deprecated_params(
extension=(
"0.7",
":class:`.AttributeExtension` is deprecated in favor of the "
":class:`.AttributeEvents` listener interface. The "
":paramref:`.relationship.extension` parameter will be "
"removed in a future release.",
)
)
def __init__(
self,
argument,
secondary=None,
primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False,
extension=None,
viewonly=False,
lazy="select",
collection_class=None,
passive_deletes=False,
passive_updates=True,
remote_side=None,
enable_typechecks=True,
join_depth=None,
comparator_factory=None,
single_parent=False,
innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
bake_queries=True,
_local_remote_pairs=None,
query_class=None,
info=None,
omit_join=None,
):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`.relationship` optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need for related classes to be imported into the local module space
before the dependent classes have been declared. It is still required
that the modules in which these related classes appear are imported
anywhere in the application at some point before the related mappings
are actually used, else a lookup error will be raised when the
:func:`.relationship` attempts to resolve the string reference to the
related class. An example of a string- resolved class is as
follows::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
a mapped class, or actual :class:`.Mapper` instance, representing
the target of the relationship.
:paramref:`~.relationship.argument` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`.Table`.
In less common circumstances, the argument may also be specified
as an :class:`.Alias` construct, or even a :class:`.Join` construct.
:paramref:`~.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`.Table` that is
present in the :class:`.MetaData` collection associated with the
parent-mapped :class:`.Table`.
The :paramref:`~.relationship.secondary` keyword argument is
typically applied in the case where the intermediary :class:`.Table`
is not otherwise expressed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`~.relationship.viewonly` flag so that this
:func:`.relationship` is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`~.relationship.secondary` when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works
more effectively when referring to a :class:`.Join` instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`.relationship`
configuration when using :paramref:`~.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`~.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`~.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.backref` - alternative form
of backref specification.
:param bake_queries=True:
Use the :class:`.BakedQuery` cache to cache the construction of SQL
used in lazy loads. True by default. Set to False if the
join condition of the relationship has unusual features that
might not respond well to statement caching.
.. versionchanged:: 1.2
"Baked" loading is the default implementation for the "select",
a.k.a. "lazy" loading strategy for relationships.
.. versionadded:: 1.0.0
.. seealso::
:ref:`baked_toplevel`
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`~.relationship.cascade_backrefs` option is used.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
a class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionchanged:: 0.9.0 -
:paramref:`~.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
:param foreign_keys:
a list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`.relationship`
object's :paramref:`~.relationship.primaryjoin` condition.
That is, if the :paramref:`~.relationship.primaryjoin`
condition of this :func:`.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`.relationship` is ``b.a_id``.
In normal cases, the :paramref:`~.relationship.foreign_keys`
parameter is **not required.** :func:`.relationship` will
automatically determine which columns in the
:paramref:`~.relationship.primaryjoin` condition are to be
considered "foreign key" columns based on those
:class:`.Column` objects that specify :class:`.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`.ForeignKeyConstraint` construct.
:paramref:`~.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`.relationship` to consider just those columns specified
here as "foreign".
2. The :class:`.Table` being mapped does not actually have
:class:`.ForeignKey` or :class:`.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`~.relationship.primaryjoin` argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`~.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`.relationship` doesn't raise any exceptions, the
:paramref:`~.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`~.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`~.relationship.primaryjoin` condition.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
The option supports the same "nested" and "unnested" options as
that of :paramref:`.joinedload.innerjoin`. See that flag
for details on nested / unnested behaviors.
.. seealso::
:paramref:`.joinedload.innerjoin` - the option as specified by
loader option, including detail on nesting behavior.
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`~.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``selectin`` - items should be loaded "eagerly" as the parents
are loaded, using one or more additional SQL statements, which
issues a JOIN to the immediate parent object, specifying primary
key identifiers using an IN clause.
.. versionadded:: 1.2
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``raise`` - lazy loading is disallowed; accessing
the attribute, if its value were not already loaded via eager
loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
This strategy can be used when objects are to be detached from
their attached :class:`.Session` after they are loaded.
.. versionadded:: 1.1
* ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
accessing the attribute, if its value were not already loaded via
eager loading, will raise an
:exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
needs to emit SQL**. If the lazy load can pull the related value
from the identity map or determine that it should be None, the
value is loaded. This strategy can be used when objects will
remain associated with the attached :class:`.Session`, however
additional SELECT statements should be blocked.
.. versionadded:: 1.1
* ``dynamic`` - the attribute will return a pre-configured
:class:`.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on
relationship loader configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:ref:`collections_noload_raiseload` - notes on "noload" and "raise"
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`~.relationship.load_on_pending` flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
indicates the ordering that should be applied when loading these
items. :paramref:`~.relationship.order_by` is expected to refer to
one of the :class:`.Column` objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`~.relationship.order_by` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when the parent
object is deleted and there is no delete or delete-orphan cascade
enabled. This is typically used when a triggering or error raise
scenario is in place on the database side. Note that the foreign
key attributes on in-session child objects will not be changed after
a flush occurs so this is a very special use-case setting.
Additionally, the "nulling out" will still occur if the child
object is de-associated with the parent.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates the persistence behavior to take when a referenced
primary key value changes in place, indicating that the referencing
foreign key columns will also need their value changed.
When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. When False, the SQLAlchemy :func:`.relationship`
construct will attempt to emit its own UPDATE statements to
modify related targets. However note that SQLAlchemy **cannot**
emit an UPDATE for more than one level of cascade. Also,
setting this flag to False is not compatible in the case where
the database is in fact enforcing referential integrity, unless
those constraints are explicitly "deferred", if the target backend
supports it.
It is highly advised that an application which is employing
mutable primary keys keeps ``passive_updates`` set to True,
and instead uses the referential integrity features of the database
itself in order to handle the change efficiently and fully.
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`~.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
a SQL expression that will be used as the primary
join of the child object against the parent object, or in a
many-to-many relationship the join of the parent object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`~.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`~.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`~.relationship.remote_side`, typically
when a custom :paramref:`~.relationship.primaryjoin` condition
is used.
:param query_class:
a :class:`.Query` subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`.orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
a SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`~.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`~.relationship.single_parent` flag may be appropriate.
:param uselist:
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`~.relationship.uselist` to
False.
The :paramref:`~.relationship.uselist` flag is also available on an
existing :func:`.relationship` construct as a read-only attribute,
which can be used to determine if this :func:`.relationship` deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`~.relationship.uselist` flag is needed.
:param viewonly=False:
when set to True, the relationship is used only for loading objects,
and not for any persistence operation. A :func:`.relationship`
which specifies :paramref:`~.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`~.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`~.sql.expression.cast`. The
:paramref:`~.relationship.viewonly` flag is also of general use when
defining any kind of :func:`~.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
:param omit_join:
Allows manual control over the "selectin" automatic join
optimization. Set to ``False`` to disable the "omit join" feature
added in SQLAlchemy 1.3.
.. versionadded:: 1.3
"""
super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.bake_queries = bake_queries
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property = set()
self.cascade = (
cascade if cascade is not False else "save-update, merge"
)
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(
self, prop, parentmapper, adapt_to_entity=None, of_type=None
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
self.property,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
@util.memoized_property
def entity(self):
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`.relationship`.
"""
return self.property.entity
@util.memoized_property
def mapper(self):
"""The target :class:`.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type_mapper = inspect(self._of_type).mapper
else:
of_type_mapper = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_mapper=of_type_mapper,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=cls,
)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, "_of_type", None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
criterion is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(criterion)
if secondary is not None:
ex = sql.exists(
[1], crit, from_obj=[dest, secondary]
).correlate_except(dest, secondary)
else:
ex = sql.exists([1], crit, from_obj=dest).correlate_except(
dest
)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.property._optimized_compare(
other, adapt_source=self.adapter
)
if self.property.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
dict_ = state.dict
return sql.bindparam(
x,
unique=True,
callable_=self.property._get_attr_w_warn_on_none(
self.property.mapper, state, dict_, col
),
)
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def _with_parent(self, instance, alias_secondary=True, from_entity=None):
assert instance is not None
adapt_source = None
if from_entity is not None:
insp = inspect(from_entity)
if insp.is_aliased_class:
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state,
value_is_parent=False,
adapt_source=None,
alias_secondary=True,
):
if state is not None:
state = attributes.instance_state(state)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(self.secondary.alias()).traverse(
criterion
)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(self, mapper, state, dict_, column):
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
def _go():
last_known = to_return = state._last_known_values[prop.key]
existing_is_available = last_known is not attributes.NO_VALUE
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=attributes.PASSIVE_OFF
if state.persistent
else attributes.PASSIVE_NO_FETCH ^ attributes.INIT_OK,
)
if current_value is attributes.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is attributes.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).get(
source_state, source_dict
)
if hasattr(instances, "_sa_adapter"):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, dest_list, _adapt=False
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self, state, dict_, key, passive=attributes.PASSIVE_OFF
):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, "get_collection"):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
def entity(self): # type: () -> Union[AliasedInsp, Mapper]
"""Return the target mapped entity, which is an inspect() of the
class or aliased class tha is referred towards.
"""
if util.callable(self.argument) and not isinstance(
self.argument, (type, mapperlib.Mapper)
):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
return mapperlib.class_mapper(argument, configure=False)
try:
entity = inspect(argument)
except sa_exc.NoInspectionAvailable:
pass
else:
if hasattr(entity, "mapper"):
return entity
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument))
)
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
return self.entity.mapper
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
val = getattr(self, attr)
if val is not None:
setattr(
self,
attr,
_orm_deannotate(
expression._only_column_elements(val, attr)
),
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in util.to_list(self.order_by)
]
self._user_defined_foreign_keys = util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(self._user_defined_foreign_keys)
)
self.remote_side = util.column_set(
expression._only_column_elements(x, "remote_side")
for x in util.to_column_set(self.remote_side)
)
self.target = self.entity.persist_selectable
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if self.parent.non_primary and not mapperlib.class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
def _get_cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
cascade = property(_get_cascade, _set_cascade)
def _check_cascade_settings(self, cascade):
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"On %s, delete-orphan cascade is not supported "
"on a many-to-many or many-to-one relationship "
"when single_parent is not set. Set "
"single_parent=True on the relationship()." % self
)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn(
"On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only." % self
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper):
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if (
self.secondary is not None
and self.secondary.c.contains_column(c)
):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = (
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic=False,
source_selectable=None,
dest_polymorphic=False,
dest_selectable=None,
of_type_mapper=None,
):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
dest_selectable = self.entity.selectable
if dest_polymorphic and self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
dest_mapper = of_type_mapper or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (source_selectable is not None)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
class JoinCondition(object):
def __init__(
self,
parent_persist_selectable,
child_persist_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self):
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
We'd want to remove "parentmapper" also, but apparently there's
an exotic use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity",)
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity",)
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
except sa_exc.NoForeignKeysError:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
)
else:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
)
except sa_exc.AmbiguousForeignKeysError:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables." % (self.prop, self.secondary)
)
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table." % self.prop
)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self):
if self.prop is None:
return
def parentmappers_(elem):
if "remote" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.mapper})
elif "local" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.parent})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set(
[
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
]
)
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self):
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if len(to_.foreign_keys) < 2:
continue
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
pr.mapper in mapperlib._mapper_registry
and (
self.prop._persists_for(pr.parent)
or pr._persists_for(self.prop.parent)
)
and fr_ is not from_
and pr not in self.prop._reverse_property
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"Consider applying "
"viewonly=True to read-only relationships, or provide "
"a primaryjoin condition marking writable columns "
"with the foreign() annotation."
% (
self.prop,
from_,
to_,
", ".join(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
),
)
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set(
[
col
for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
]
)
def join_targets(
self, source_selectable, dest_selectable, aliased, single_crit=None
):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(secondary).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and "local" in col._annotations)
or reverse_direction
and (
(has_secondary and col in lookup)
or (not has_secondary and "remote" in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True
)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
|
py | b40fae474af9ba0778febf03376f6dab5256d05f | # -*- coding: utf-8 -*-
"""
//////////////////////////////////////////////////////////////////////////////////////////
// Original author: Aritz Lizoain
// Github: https://github.com/aritzLizoain
// My personal website: https://aritzlizoain.github.io/
// Description: CNN Image Segmentation
// Copyright 2020, Aritz Lizoain.
// License: MIT License
//////////////////////////////////////////////////////////////////////////////////////////
- load_images (unused)
- get_weights: calculates the weights for the loss function
- process_fits: loads FITS files and creates small sections
- images_small2big: reconstructs small sections
- check_one_object: looks for the chosen category section by section
"""
import os
import sys
import numpy as np
import cv2
from skimage.transform import resize
##############################################################
# NOT USED IN VERSION 2.0.
# THE IMAGES ARE NOW SAVED AND LOADED AS ARRAYS, NOT AS PNG FILES
# def load_images(TRAIN_PATH='', TEST_PATH='',\
# TEST_PREDICTIONS_PATH='',IMG_WIDTH = \
# 256, IMG_HEIGHT = 256):
# train_ids = next(os.walk(TRAIN_PATH))[2]
# test_ids = next(os.walk(TEST_PATH))[2]
# # Get and resize train images and masks
# images = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH,3)\
# , dtype=np.uint8)
# test_images = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH\
# , 3), dtype=np.uint8)
# sys.stdout.flush()
# # # train images
# for n,id_ in enumerate(train_ids):
# img = cv2.imread(TRAIN_PATH + id_)
# img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant'\
# , preserve_range=True)
# images[n] = img
# # # test images
# for n,id_ in enumerate(test_ids):
# mask_ = cv2.imread(TEST_PATH + id_)
# mask_ = resize(mask_, (IMG_HEIGHT, IMG_WIDTH),\
# preserve_range=True, mode='constant')
# test_images[n] = mask_
# print('Dataset correctly loaded')
# return images, test_images
#-------------------------------------------------------------
def get_weights(images,test_images):
from mask import get_percentages
#all_images = np.concatenate((images, test_images)) to take
#both training and test images
all_images=images #to take only training images
unique_elements, percentage = get_percentages(all_images)
inverse_percentages=1/percentage #the weights are inversely
#proportional to their frequency
weights = inverse_percentages/sum(inverse_percentages)*\
len(unique_elements) #normalize to the number of classes
return weights
#-------------------------------------------------------------
def process_fits(name='name.fits', size=256, normalized='yes'\
, normalization_value=255):
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
import numpy as np
#LOADING THE IMAGE AND GETTING INFORMATION
image_file = get_pkg_data_filename(name)
image_data = fits.getdata(image_file, ext=0)
# image_data=image_data/100
# normalize
if normalized=='yes':
maximum_value=np.amax(image_data)
image_data_normalized=image_data/maximum_value*\
normalization_value
elif normalized=='no':
# image_data=image_data
None
else:
print(' ERROR: The given input for the normalization\
variable is not an option. Please choose yes/no')
#information about the original full image
image_length=image_data.shape[1]
image_height=image_data.shape[0]
amount_images_wide=int((image_length/2)/size) #we will only
#take half of the image
amount_images_high=int(image_height/size)
# # RESIZE image UNUSED
# if image_length/size-amount_images_wide < 0.5:
# amount_images_wide=amount_images_wide
# else:
# amount_images_wide=amount_images_wide + 1
# if image_height/size-amount_images_high < 0.5:
# amount_images_high=amount_images_high
# else:
# amount_images_high=amount_images_high + 1
# number_of_images=amount_images_wide*amount_images_high
# if normalized=='yes':
# image_data_normalized_resized=np.resize(image_data_normalized, (size*amount_images_high, size*amount_images_wide))
# print(' Resized and normalized real test image shape: {0}'.format(image_data_normalized_resized.shape))
# plt.figure()
# plt.imshow(image_data_normalized_resized)
# plt.colorbar()
# plt.title('Normalized and resized real test image', fontsize=15)
# plt.show()
# image_data_use = image_data_normalized_resized
# elif normalized=='no':
# image_data_resized=np.resize(image_data, (size*amount_images_high, size*amount_images_wide))
# print(' Resized real test image shape: {0}'.format(image_data_resized.shape))
# plt.figure()
# plt.imshow(image_data_resized)
# plt.colorbar()
# plt.title('Resized real test image', fontsize=25)
# plt.show()
# image_data_use = image_data_resized
#CUT
number_of_images = amount_images_wide*amount_images_high
image_data_use=np.zeros((amount_images_high*size,amount_images_wide*size))
starting_value=image_data.shape[1]-image_data_use.shape[1]
if normalized=='yes':
for i in range(0,image_data_use.shape[0]):
for j in range (0,image_data_use.shape[1]):
image_data_use[i,j] = image_data_normalized[i,j + starting_value]
print(' Cut and normalized real test image shape: {0}'.format(image_data_use.shape))
plt.figure()
plt.grid(False)
plt.imshow(image_data_use)
plt.colorbar()
plt.title('Normalized and cut real test image', fontsize=15)
plt.show()
elif normalized=='no':
for i in range(0,image_data_use.shape[0]):
for j in range (0,image_data_use.shape[1]):
image_data_use[i,j] = image_data[i,j + starting_value]
plt.figure()
plt.grid(False)
plt.imshow(image_data_use)
plt.colorbar()
plt.title('Cut real test image', fontsize=20)
plt.show()
print(' Cut real test image shape: {0}'.format(image_data_use.shape))
# Create the smaller sections
print(' Creating {1} sections of size {0}X{0}...'.format(size, number_of_images))
images_small=np.zeros((number_of_images,size,size))
# print(' Images small shape: {0}'.format(images_small.shape))
for i in range(0, amount_images_wide):
for j in range(0, amount_images_high):
for x in range(0, size):
for y in range (0, size):
images_small[i+j*(amount_images_wide),y,x]=image_data_use[y+j*size,x+i*size]
print(' Real test images correctly created')
details=np.array([size, amount_images_high, amount_images_wide], dtype=int)
return image_data_use, images_small, details
#----------------------------------------------------------------------------
# from mask input of (n_sections, size, size, 4) gives mask output of (size, size, 4)
def images_small2big(images, details):
# Create the big image from small sections
size = details[0]
amount_images_high = details[1]
amount_images_wide = details[2]
dimensions = images.shape[3]
full_image_empty = np.zeros((size*amount_images_high, size*amount_images_wide, dimensions))
print(' Creating the real predicted test image from the {0} sections...'.format(len(images)))
for i in range(0, amount_images_wide):
for j in range(0, amount_images_high):
for x in range(0, size):
for y in range (0, size):
full_image_empty[y+j*size,x+i*size] = images[i+j*(amount_images_wide),y,x]
print(' Real test image prediction correctly created')
return full_image_empty
#----------------------------------------------------------------------------
# CHECK THE ONES WITH A SPECIFIC OBJECT IN SMALL SECTIONS
def check_one_object(test_outputs_real, test_images_real, object_to_find='Cluster', real_percentages=[0,0,0,0], details=[0,0,0]):
from mask import get_max_in_mask, mask_to_label
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if object_to_find=='Background':
object_number = 0
elif object_to_find=='Glowing':
object_number = 1
elif object_to_find=='Hot pixel':
object_number = 2
elif object_to_find=='Cluster':
object_number = 3
else:
print(' ERROR: The given input for the object to find variable is not an option.\
Please choose background/glowing/hot pixel/cluster')
#Legend 1
red_patch = mpatches.Patch(color=[1, 0.2, 0.2], label='Cluster')
blue_patch = mpatches.Patch(color=[0,0.5,1.], label='Hot pixel')
green_patch = mpatches.Patch(color=[0.35,1.,0.25], label='Glowing')
black_patch = mpatches.Patch(color=[0./255, 0./255, 0./255], label='Background')
counter = 0
for i in range (len(test_outputs_real)):
check=test_outputs_real[i]
check=check[np.newaxis, ...]
check=get_max_in_mask(check)
is_there=object_number in check
#in order to know the position of each section
ychange = int(i/details[2])*details[0] #y axis position
xchange = (i-int(i/details[2])*details[2])*details[0] #x axis position
if is_there == True:
from mask import output_to_label_one_object
label_with_one_object = output_to_label_one_object(check, object_number)
label_all_objects = mask_to_label(check, to_print='no')
fig, ax = plt.subplots(1, 3, figsize=(20, 10))
# plt.setp(ax, xticklabels=pixels, yticklabels=pixels)
ax[0].grid(False)
ax0 = ax[0].imshow(np.squeeze(test_images_real[i]))
ax[0].set_title('Section {0}'.format(i+1), fontsize=25);
ax[0].set_xlabel('pixels', fontsize=16)
ax[0].set_ylabel('pixels', fontsize=16)
ax[0].set_xticks([0,50,100,150,200,250])
ax[0].set_xticklabels([0+xchange,50+xchange,100+xchange,150+xchange,200+xchange,250+xchange])
ax[0].set_yticks([0,50,100,150,200,250])
ax[0].set_yticklabels([0+ychange,50+ychange,100+ychange,150+ychange,200+ychange,250+ychange])
cax = fig.add_axes([0.12, 0.16, 0.25, 0.03])
plt.colorbar(ax0, orientation="horizontal", cax=cax)
ax[1].grid(False)
ax[1].imshow(label_all_objects[0])
ax[1].set_title('Predicted label', fontsize=25);
ax[1].set_xlabel('pixels', fontsize=16)
ax[1].set_ylabel('pixels', fontsize=16)
ax[1].set_xticks([0,50,100,150,200,250])
ax[1].set_xticklabels([0+xchange,50+xchange,100+xchange,150+xchange,200+xchange,250+xchange])
ax[1].set_yticks([0,50,100,150,200,250])
ax[1].set_yticklabels([0+ychange,50+ychange,100+ychange,150+ychange,200+ychange,250+ychange])
ax[2].grid(False)
ax[2].imshow(label_with_one_object[0])
ax[2].set_title('Finding {0}'.format(object_to_find), fontsize=25);
ax[2].set_xlabel('pixels', fontsize=16)
ax[2].set_ylabel('pixels', fontsize=16)
ax[2].set_xticks([0,50,100,150,200,250])
ax[2].set_xticklabels([0+xchange,50+xchange,100+xchange,150+xchange,200+xchange,250+xchange])
ax[2].set_yticks([0,50,100,150,200,250])
ax[2].set_yticklabels([0+ychange,50+ychange,100+ychange,150+ychange,200+ychange,250+ychange])
plt.legend(loc='upper center', bbox_to_anchor=(2.1, 1.5), fontsize=16,\
handles=[red_patch, blue_patch, green_patch, black_patch], ncol=4)
plt.show() #the image is not being saved
counter=counter + 1
# print(' {1} found in section {0}'.format(i, object_to_find))
else:
counter=counter
print(' {1} found in {0} sections'.format(counter, object_to_find))
return None
|
py | b40fae5d700d4d370455fa9aa90ed791ddab379b | '''
django-cleanup sends the following signals
'''
from django.dispatch import Signal
__all__ = ['cleanup_pre_delete', 'cleanup_post_delete']
cleanup_pre_delete = Signal()
'''Called just before a file is deleted. Passes a `file` keyword argument.'''
cleanup_post_delete = Signal()
'''Called just after a file is deleted. Passes a `file` keyword argument.'''
|
py | b40fae8baf1e28c54dbe92fd9c45ae22b17f2ebd | import os
import subprocess
import sys
VERSION = "5.0.0.dev"
PATHOD = "pathod " + VERSION
MITMPROXY = "mitmproxy " + VERSION
# Serialization format version. This is displayed nowhere, it just needs to be incremented by one
# for each change in the file format.
FLOW_FORMAT_VERSION = 7
def get_dev_version() -> str:
"""
Return a detailed version string, sourced either from VERSION or obtained dynamically using git.
"""
mitmproxy_version = VERSION
here = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
try:
git_describe = subprocess.check_output(
['git', 'describe', '--long'],
stderr=subprocess.STDOUT,
cwd=here,
)
last_tag, tag_dist, commit = git_describe.decode().strip().rsplit("-", 2)
commit = commit.lstrip("g")[:7]
tag_dist = int(tag_dist)
except Exception:
pass
else:
# Add commit info for non-tagged releases
if tag_dist > 0:
mitmproxy_version += f" (+{tag_dist}, commit {commit})"
# PyInstaller build indicator, if using precompiled binary
if getattr(sys, 'frozen', False):
mitmproxy_version += " binary"
return mitmproxy_version
if __name__ == "__main__": # pragma: no cover
print(VERSION)
|
py | b40fae8d239da44144a2f80018bf57ea7ed7a96e | """
Smartglass protocol core
**NOTE**: Should not be used directly, use :class:`.Console` !
"""
import uuid
import json
import base64
import logging
import asyncio
import socket
from typing import List, Optional, Tuple, Dict, Union
from xbox.sg import factory, packer, crypto, console
from xbox.sg.packet.message import message_structs
from xbox.sg.enum import PacketType, ConnectionResult, DisconnectReason,\
ServiceChannel, MessageType, AckStatus, SGResultCode, ActiveTitleLocation,\
PairedIdentityState, PublicKeyType
from xbox.sg.constants import WindowsClientInfo, AndroidClientInfo,\
MessageTarget
from xbox.sg.manager import MediaManager, InputManager, TextManager
from xbox.sg.utils.events import Event
from xbox.sg.utils.struct import XStruct
LOGGER = logging.getLogger(__name__)
PORT = 5050
BROADCAST = '255.255.255.255'
MULTICAST = '239.255.255.250'
CHANNEL_MAP = {
ServiceChannel.SystemInput: MessageTarget.SystemInputUUID,
ServiceChannel.SystemInputTVRemote: MessageTarget.SystemInputTVRemoteUUID,
ServiceChannel.SystemMedia: MessageTarget.SystemMediaUUID,
ServiceChannel.SystemText: MessageTarget.SystemTextUUID,
ServiceChannel.SystemBroadcast: MessageTarget.SystemBroadcastUUID
}
class ProtocolError(Exception):
"""
Exception thrown by CoreProtocol
"""
pass
class SmartglassProtocol(asyncio.DatagramProtocol):
HEARTBEAT_INTERVAL = 3.0
def __init__(
self,
address: Optional[str] = None,
crypto_instance: Optional[crypto.Crypto] = None
):
"""
Instantiate Smartglass Protocol handler.
Args:
address: Address
crypto_instance: Crypto instance
"""
self.address = address
self._transport: Optional[asyncio.DatagramTransport] = None
self.crypto = crypto_instance
self._discovered = {}
self.target_participant_id = None
self.source_participant_id = None
self._pending: Dict[str, asyncio.Future] = {}
self._chl_mgr = ChannelManager()
self._seq_mgr = SequenceManager()
self._frg_mgr = FragmentManager()
self.on_timeout = Event()
self.on_discover = Event()
self.on_message = Event()
self.on_json = Event()
self.started = False
async def stop(self) -> None:
"""
Dummy
"""
pass
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.started = True
self._transport = transport
def error_received(self, exc: OSError):
print('Error received:', exc.args)
def connection_lost(self, exc: Optional[Exception]):
print("Connection closed")
self._transport.close()
self.started = False
async def send_message(
self,
msg,
channel=ServiceChannel.Core,
addr: Optional[str] = None,
blocking: bool = True,
timeout: int = 5,
retries: int = 3
) -> Optional[XStruct]:
"""
Send message to console.
Packing and encryption happens here.
Args:
msg: Unassembled message to send
channel: Channel to send the message on,
Enum member of `ServiceChannel`
addr: IP address of target console
blocking: If set and `msg` is `Message`-packet, wait for ack
timeout: Seconds to wait for ack, only useful if `blocking`
is `True`
retries: Max retry count.
Returns: None
Raises:
ProtocolError: On failure
"""
if msg.header.pkt_type == PacketType.Message:
msg.header(
sequence_number=self._seq_mgr.next_sequence_num(),
target_participant_id=self.target_participant_id,
source_participant_id=self.source_participant_id,
channel_id=self._chl_mgr.get_channel_id(channel)
)
if self.crypto:
data = packer.pack(msg, self.crypto)
else:
data = packer.pack(msg)
if self.address:
addr = self.address
if not addr:
raise ProtocolError("No address specified in send_message")
elif not data:
raise ProtocolError("No data")
if msg.header.pkt_type == PacketType.Message \
and msg.header.flags.need_ack and blocking:
LOGGER.debug(
"Sending %s message on ServiceChannel %s to %s",
msg.header.flags.msg_type.name, channel.name, addr,
extra={'_msg': msg}
)
seqn = msg.header.sequence_number
tries = 0
result = None
while tries < retries and not result:
if tries > 0:
LOGGER.warning(
f"Message {msg.header.flags.msg_type.name} on "
f"ServiceChannel {channel.name} to {addr} not ack'd "
f"in time, attempt #{tries + 1}",
extra={'_msg': msg}
)
await self._send(data, (addr, PORT))
result = await self._await_ack('ack_%i' % seqn, timeout)
tries += 1
if result:
return result
raise ProtocolError("Exceeded retries")
elif msg.header.pkt_type == PacketType.ConnectRequest:
LOGGER.debug(
f"Sending ConnectRequest to {addr}", extra={'_msg': msg}
)
await self._send(data, (addr, PORT))
async def _send(self, data: bytes, target: Tuple[str, int]):
"""
Send data on the connected transport.
If addr is not provided, the target address that was used at the time
of instantiating the protocol is used.
(e.g. asyncio.create_datagram_endpoint in Console-class).
Args:
data: Data to send
target: Tuple of (ip_address, port)
"""
if self._transport:
self._transport.sendto(data, target)
else:
LOGGER.error('Transport not ready...')
def datagram_received(self, data: bytes, addr: str) -> None:
"""
Handle incoming smartglass packets
Args:
data: Raw packet
addr: IP address of sender
Returns: None
"""
try:
host, _ = addr
if self.crypto:
msg = packer.unpack(data, self.crypto)
else:
msg = packer.unpack(data)
if msg.header.pkt_type == PacketType.DiscoveryResponse:
LOGGER.debug(
f"Received DiscoverResponse from {host}",
extra={'_msg': msg}
)
self._discovered[host] = msg
self.on_discover(host, msg)
elif msg.header.pkt_type == PacketType.ConnectResponse:
LOGGER.debug(
f"Received ConnectResponse from {host}",
extra={'_msg': msg}
)
if 'connect' in self._pending:
self._set_result('connect', msg)
elif msg.header.pkt_type == PacketType.Message:
channel = self._chl_mgr.get_channel(msg.header.channel_id)
message_info = msg.header.flags.msg_type.name
if msg.header.flags.is_fragment:
message_info = 'MessageFragment ({0})'.format(message_info)
LOGGER.debug(
"Received %s message on ServiceChannel %s from %s",
message_info, channel.name, host, extra={'_msg': msg}
)
seq_num = msg.header.sequence_number
self._seq_mgr.add_received(seq_num)
if msg.header.flags.need_ack:
asyncio.create_task(
self.ack(
[msg.header.sequence_number],
[],
ServiceChannel.Core
)
)
self._seq_mgr.low_watermark = seq_num
if msg.header.flags.is_fragment:
sequence_begin = msg.protected_payload.sequence_begin
sequence_end = msg.protected_payload.sequence_end
fragment_payload = self._frg_mgr.reassemble_message(msg)
if not fragment_payload:
return
msg(protected_payload=fragment_payload)
LOGGER.debug("Assembled {0} (Seq {1}:{2})".format(
message_info, sequence_begin, sequence_end
), extra={'_msg': msg})
self._on_message(msg, channel)
else:
self._on_unk(msg)
except Exception:
LOGGER.exception("Exception in CoreProtocol datagram handler")
@staticmethod
def _on_unk(msg) -> None:
LOGGER.error(f'Unhandled message: {msg}')
async def _await_ack(
self,
identifier: str,
timeout: int = 5
) -> Optional[XStruct]:
"""
Wait for acknowledgement of message
Args:
identifier: Identifier of ack
timeout: Timeout in seconds
Returns:
:obj:`.Event`: Event
"""
fut = asyncio.Future()
self._pending[identifier] = fut
try:
await asyncio.wait_for(fut, timeout)
return fut.result()
except asyncio.TimeoutError:
return None
def _set_result(
self,
identifier: str,
result: Union[AckStatus, XStruct]
) -> None:
"""
Called when an acknowledgement comes in, unblocks `_await_ack`
Args:
identifier: Identifier of ack
result: Ack status
Returns: None
"""
self._pending[identifier].set_result(result)
del self._pending[identifier]
async def _heartbeat_task(self) -> None:
"""
Task checking for console activity, firing `on_timeout`-event on
timeout.
Heartbeats are empty "ack" messages that are to be ack'd by the console
Returns:
None
"""
while self.started:
try:
await self.ack([], [], ServiceChannel.Core, need_ack=True)
except ProtocolError:
self.on_timeout()
self.connection_lost(TimeoutError())
break
await asyncio.sleep(self.HEARTBEAT_INTERVAL)
def _on_message(self, msg: XStruct, channel: ServiceChannel) -> None:
"""
Handle msg of type `Message`.
Args:
msg: Message
channel: Channel the message was received on
Returns: None
"""
msg_type = msg.header.flags.msg_type
# First run our internal handlers
if msg_type == MessageType.Ack:
self._on_ack(msg)
elif msg_type == MessageType.StartChannelResponse:
self._chl_mgr.handle_channel_start_response(msg)
elif msg_type == MessageType.Json:
self._on_json(msg, channel)
# Then our hooked handlers
self.on_message(msg, channel)
def _on_ack(self, msg: XStruct) -> None:
"""
Process acknowledgement message.
Args:
msg: Message
Returns: None
"""
for num in msg.protected_payload.processed_list:
identifier = 'ack_%i' % num
self._seq_mgr.add_processed(num)
if identifier in self._pending:
self._set_result(identifier, AckStatus.Processed)
for num in msg.protected_payload.rejected_list:
identifier = 'ack_%i' % num
self._seq_mgr.add_rejected(num)
if identifier in self._pending:
self._set_result(identifier, AckStatus.Rejected)
def _on_json(self, msg: XStruct, channel: ServiceChannel) -> None:
"""
Process json message.
Args:
msg: Message
channel: Channel the message was received on
Returns: None
"""
text = msg.protected_payload.text
if 'fragment_data' in text:
text = self._frg_mgr.reassemble_json(text)
if not text:
# Input message is a fragment, but cannot assemble full msg yet
return
self.on_json(text, channel)
async def discover(
self,
addr: str = None,
tries: int = 5,
blocking: bool = True,
timeout: int = 5
) -> Dict[str, XStruct]:
"""
Discover consoles on the network
Args:
addr (str): IP address
tries (int): Discover attempts
blocking (bool): Wait a given time for responses, otherwise
return immediately
timeout (int): Timeout in seconds (only if `blocking` is `True`)
Returns:
list: List of discovered consoles
"""
self._discovered = {}
msg = factory.discovery()
task = asyncio.create_task(self._discover(msg, addr, tries))
# Blocking for a discovery is different than connect or regular message
if blocking:
try:
await asyncio.wait_for(task, timeout)
except asyncio.TimeoutError:
pass
return self.discovered
async def _discover(
self,
msg,
addr: str,
tries: int
) -> None:
for _ in range(tries):
await self.send_message(msg, addr=BROADCAST)
await self.send_message(msg, addr=MULTICAST)
if addr:
await self.send_message(msg, addr=addr)
await asyncio.sleep(0.5)
@property
def discovered(self) -> Dict[str, XStruct]:
"""
Return discovered consoles
Returns:
Discovered consoles
"""
return self._discovered
async def connect(
self,
userhash: str,
xsts_token: str,
client_uuid: uuid.UUID = uuid.uuid4(),
request_num: int = 0,
retries: int = 3
) -> PairedIdentityState:
"""
Connect to console
Args:
userhash: Userhash from Xbox Live Authentication
xsts_token: XSTS Token from Xbox Live Authentication
client_uuid: Client UUID (default: Generate random uuid)
request_num: Request number
retries: Max. connect attempts
Returns: Pairing State
Raises:
ProtocolError: If connection fails
"""
if not self.crypto:
raise ProtocolError("No crypto")
if isinstance(userhash, type(None)):
userhash = ''
if isinstance(xsts_token, type(None)):
xsts_token = ''
iv = self.crypto.generate_iv()
pubkey_type = self.crypto.pubkey_type
pubkey = self.crypto.pubkey_bytes
msg = factory.connect(
client_uuid, pubkey_type, pubkey, iv, userhash, xsts_token,
request_num, request_num, request_num + 1
)
payload_len = packer.payload_length(msg)
if payload_len < 1024:
messages = [msg]
else:
messages = _fragment_connect_request(
self.crypto, client_uuid, pubkey_type, pubkey,
userhash, xsts_token, request_num
)
tries = 0
result = None
while tries < retries and not result:
for m in messages:
await self.send_message(m)
result = await self._await_ack('connect')
if not result:
raise ProtocolError("Exceeded connect retries")
connect_result = result.protected_payload.connect_result
if connect_result != ConnectionResult.Success:
raise ProtocolError(
"Connecting failed! Result: %s" % connect_result
)
self.target_participant_id = 0
self.source_participant_id = result.protected_payload.participant_id
await self.local_join()
for channel, target_uuid in CHANNEL_MAP.items():
await self.start_channel(channel, target_uuid)
asyncio.create_task(self._heartbeat_task())
return result.protected_payload.pairing_state
async def local_join(
self,
client_info: Union[WindowsClientInfo, AndroidClientInfo] = WindowsClientInfo,
**kwargs
) -> None:
"""
Pair client with console.
Args:
client_info: Either `WindowsClientInfo` or `AndroidClientInfo`
**kwargs:
Returns: None
"""
msg = factory.local_join(client_info)
await self.send_message(msg, **kwargs)
async def start_channel(
self,
channel: ServiceChannel,
messagetarget_uuid: uuid.UUID,
title_id: int = 0,
activity_id: int = 0,
**kwargs
) -> None:
"""
Request opening of specific ServiceChannel
Args:
channel: Channel to start
messagetarget_uuid: Message Target UUID
title_id: Title ID, Only used for ServiceChannel.Title
activity_id: Activity ID, unknown use-case
**kwargs: KwArgs
Returns: None
"""
request_id = self._chl_mgr.get_next_request_id(channel)
msg = factory.start_channel(
request_id, title_id, messagetarget_uuid, activity_id
)
await self.send_message(msg, **kwargs)
async def ack(
self,
processed: List[int],
rejected: List[int],
channel: ServiceChannel,
need_ack: bool = False
) -> None:
"""
Acknowledge received messages that have `need_ack` flag set.
Args:
processed: Processed sequence numbers
rejected: Rejected sequence numbers
channel: Channel to send the ack on
need_ack: Whether we want this ack to be acknowledged by the target
participant.
Will be blocking if set.
Required for heartbeat messages.
Returns: None
"""
low_watermark = self._seq_mgr.low_watermark
msg = factory.acknowledge(
low_watermark, processed, rejected, need_ack=need_ack
)
await self.send_message(msg, channel=channel, blocking=need_ack)
async def json(
self,
data: str,
channel: ServiceChannel
) -> None:
"""
Send json message
Args:
data: JSON dict
channel: Channel to send the message to
Returns: None
"""
msg = factory.json(data)
await self.send_message(msg, channel=channel)
async def power_on(
self,
liveid: str,
addr: Optional[str] = None,
tries: int = 2
) -> None:
"""
Power on console.
Args:
liveid: Live ID of console
addr: IP address of console
tries: PowerOn attempts
Returns: None
"""
msg = factory.power_on(liveid)
for i in range(tries):
await self.send_message(msg, addr=BROADCAST)
await self.send_message(msg, addr=MULTICAST)
if addr:
await self.send_message(msg, addr=addr)
await asyncio.sleep(0.1)
async def power_off(
self,
liveid: str
) -> None:
"""
Power off console
Args:
liveid: Live ID of console
Returns: None
"""
msg = factory.power_off(liveid)
await self.send_message(msg)
async def disconnect(
self,
reason: DisconnectReason = DisconnectReason.Unspecified,
error: int = 0
) -> None:
"""
Disconnect console session
Args:
reason: Disconnect reason
error: Error Code
Returns: None
"""
msg = factory.disconnect(reason, error)
await self.send_message(msg)
async def game_dvr_record(
self,
start_delta: int,
end_delta: int
) -> AckStatus:
"""
Start Game DVR recording
Args:
start_delta: Start time
end_delta: End time
Returns: Acknowledgement status
"""
msg = factory.game_dvr_record(start_delta, end_delta)
return await self.send_message(msg)
async def launch_title(
self,
uri: str,
location: ActiveTitleLocation = ActiveTitleLocation.Full
) -> AckStatus:
"""
Launch title via URI
Args:
uri: Uri string
location: Location
Returns: Ack status
"""
msg = factory.title_launch(location, uri)
return await self.send_message(msg)
class SequenceManager:
def __init__(self):
"""
Process received messages by sequence numbers.
Also add processed / rejected messages to a list.
Tracks the `Low Watermark` that's sent with
`Acknowledgement`-Messages too.
"""
self.processed = []
self.rejected = []
self.received = []
self._low_watermark = 0
self._sequence_num = 0
def add_received(self, sequence_num: int) -> None:
"""
Add received sequence number
Args:
sequence_num: Sequence number
Returns: None
"""
if sequence_num not in self.received:
self.received.append(sequence_num)
def add_processed(self, sequence_num: int) -> None:
"""
Add sequence number of message that was sent to console
and succeeded in processing.
Args:
sequence_num: Sequence number
Returns: None
"""
if sequence_num not in self.processed:
self.processed.append(sequence_num)
def add_rejected(self, sequence_num: int) -> None:
"""
Add sequence number of message that was sent to console
and was rejected by it.
Args:
sequence_num: Sequence number
Returns: None
"""
if sequence_num not in self.rejected:
self.rejected.append(sequence_num)
def next_sequence_num(self) -> int:
"""
Get next sequence number to use for outbound `Message`.
Returns: None
"""
self._sequence_num += 1
return self._sequence_num
@property
def low_watermark(self) -> int:
"""
Get current `Low Watermark`
Returns: Low Watermark
"""
return self._low_watermark
@low_watermark.setter
def low_watermark(self, value: int) -> None:
"""
Set `Low Watermark`
Args:
value: Last received sequence number from console
Returns: None
"""
if value > self._low_watermark:
self._low_watermark = value
class ChannelError(Exception):
"""
Exception thrown by :class:`ChannelManager`.
"""
pass
class ChannelManager:
CHANNEL_CORE = 0
CHANNEL_ACK = 0x1000000000000000
def __init__(self):
"""
Keep track of established ServiceChannels
"""
self._channel_mapping = {}
self._requests = {}
self._request_id = 0
def handle_channel_start_response(self, msg: XStruct) -> ServiceChannel:
"""
Handle message of type `StartChannelResponse`
Args:
msg: Start Channel Response message
Raises:
:class:`ChannelError`: If channel acquire failed
Returns: Acquired ServiceChannel
"""
# Find ServiceChannel by RequestId
request_id = msg.protected_payload.channel_request_id
channel = self._requests.get(request_id)
if not channel:
raise ChannelError("Request Id %d not found. Was the channel request saved?" % request_id)
if msg.protected_payload.result != SGResultCode.SG_E_SUCCESS:
raise ChannelError("Acquiring ServiceChannel %s failed" % channel.name)
# Save Channel Id for appropriate ServiceChannel
channel_id = msg.protected_payload.target_channel_id
self._channel_mapping[channel] = channel_id
self._requests.pop(request_id)
LOGGER.debug("Acquired ServiceChannel %s -> Channel: 0x%x", channel.name, channel_id)
return channel
def get_next_request_id(self, channel: ServiceChannel) -> int:
"""
Get next Channel request id for ServiceChannel
Incremented on each call.
Args:
channel: Service channel
Returns: Channel request id
"""
# Clear old request for same ServiceChannel
self._requests = {key: val for key, val in self._requests.items()
if val != channel}
self._request_id += 1
self._requests[self._request_id] = channel
return self._request_id
def get_channel(self, channel_id: int) -> ServiceChannel:
"""
Get matching ServiceChannel enum for provided Channel ID of `Message`
Args:
channel_id: Channel of Message
Returns: Service channel
"""
# Core and Ack are fixed, don't need mapping
if channel_id == self.CHANNEL_CORE:
return ServiceChannel.Core
elif channel_id == self.CHANNEL_ACK:
return ServiceChannel.Ack
for key, value in self._channel_mapping.items():
if value == channel_id:
return key
raise ChannelError("ServiceChannel not found for channel_id: 0x%x"
% channel_id)
def get_channel_id(self, channel: ServiceChannel) -> int:
"""
Get Channel ID for use in `Message` for provided ServiceChannel
Args:
channel: Service channel
Returns: Channel ID for use in `Message`
"""
# Core and Ack are fixed, don't need mapping
if channel == ServiceChannel.Core:
return self.CHANNEL_CORE
elif channel == ServiceChannel.Ack:
return self.CHANNEL_ACK
if channel not in self._channel_mapping:
raise ChannelError(
f"Channel ID not found for ServiceChannel: {channel}"
)
return self._channel_mapping[channel]
def reset(self) -> None:
"""
Erase the channels table
Returns:
None
"""
self._requests = {}
self._channel_mapping = {}
self._request_id = 0
class FragmentError(Exception):
"""
Exception thrown by :class:`FragmentManager`.
"""
pass
class FragmentManager:
"""
Assembles fragmented messages
"""
def __init__(self):
self.msg_queue = {}
self.json_queue = {}
def reassemble_message(self, msg: XStruct) -> Optional[XStruct]:
"""
Reassemble message fragment
Args:
msg: Message fragment
Returns: Reassembled / decoded payload on success,
`None` if payload is not ready or assembly failed.
"""
msg_type = msg.header.flags.msg_type
payload = msg.protected_payload
current_sequence = msg.header.sequence_number
sequence_begin = payload.sequence_begin
sequence_end = payload.sequence_end
self.msg_queue[current_sequence] = payload.data
wanted_sequences = list(range(sequence_begin, sequence_end))
assembled = b''
for s in wanted_sequences:
data = self.msg_queue.get(s)
if not data:
return
assembled += data
[self.msg_queue.pop(s) for s in wanted_sequences]
# Parse raw data with original message struct
struct = message_structs.get(msg_type)
if not struct:
raise FragmentError(
f'Failed to find message struct for fragmented {msg_type}'
)
return struct.parse(assembled)
def reassemble_json(self, json_msg: dict) -> Optional[dict]:
"""
Reassemble fragmented json message
Args:
json_msg: Fragmented json message
Returns: Reassembled / Decoded json object on success,
`None` if datagram is not ready or assembly failed
"""
datagram_id, datagram_size =\
int(json_msg['datagram_id']), int(json_msg['datagram_size'])
fragment_offset = int(json_msg['fragment_offset'])
fragments = self.json_queue.get(datagram_id)
if not fragments:
# Just add initial fragment
self.json_queue[datagram_id] = [json_msg]
return None
# It's a follow-up fragment
# Check if we already received this datagram
for entry in fragments:
if fragment_offset == int(entry['fragment_offset']):
return
# Append current fragment to datagram list
fragments.append(json_msg)
# Check if fragment can be assembled
# If so, assemble and pop the fragments from queue
if sum(int(f['fragment_length']) for f in fragments) == datagram_size:
sorted_fragments = sorted(
fragments, key=lambda f: int(f['fragment_offset'])
)
output = ''.join(f['fragment_data'] for f in sorted_fragments)
self.json_queue.pop(datagram_id)
return self._decode(output)
return None
@staticmethod
def _encode(obj: dict) -> str:
"""
Dump a dict as json string, then encode with base64
Args:
obj: Dict to encode
Returns: base64 encoded string
"""
bytestr = json.dumps(obj, separators=(',', ':'), sort_keys=True)\
.encode('utf-8')
return base64.b64encode(bytestr).decode('utf-8')
@staticmethod
def _decode(data: str) -> dict:
"""
Decode a base64 encoded json object
Args:
data: Base64 string
Returns: Decoded json object
"""
return json.loads(base64.b64decode(data).decode('utf-8'))
def _fragment_connect_request(
crypto_instance: crypto.Crypto,
client_uuid: uuid.UUID,
pubkey_type: PublicKeyType,
pubkey: bytes,
userhash: str,
auth_token: str,
request_num: int = 0
) -> List:
"""
Internal method to fragment ConnectRequest.
Args:
crypto_instance: Instance of :class:`Crypto`
client_uuid: Client UUID
pubkey_type Public Key Type
pubkey: Public Key
userhash: Xbox Live Account userhash
auth_token: Xbox Live Account authentication token (XSTS)
request_num: Request Number
Returns:
list: List of ConnectRequest fragments
"""
messages = []
# Calculate packet length (without authentication data)
dummy_msg = factory.connect(
client_uuid, pubkey_type, pubkey, b'\x00' * 16, u'', u'', 0, 0, 0
)
dummy_payload_len = packer.payload_length(dummy_msg)
# Do fragmenting
total_auth_len = len(userhash + auth_token)
max_size = 1024 - dummy_payload_len
fragments = total_auth_len // max_size
overlap = total_auth_len % max_size
if overlap > 0:
fragments += 1
group_start = request_num
group_end = group_start + fragments
if fragments <= 1:
raise FragmentError('Authentication data too small to fragment')
auth_position = 0
for fragment_num in range(fragments):
available = max_size
current_hash = u''
if fragment_num == 0:
current_hash = userhash
available -= len(current_hash)
current_auth = auth_token[auth_position: auth_position + available]
auth_position += len(current_auth)
iv = crypto_instance.generate_iv()
messages.append(
factory.connect(
client_uuid, pubkey_type, pubkey, iv,
current_hash, current_auth, request_num + fragment_num,
group_start, group_end)
)
return messages
|
py | b40faebff6e8a95fcf40a51d96aa7b94d01fdae3 | # flake8: noqa
import raven
from raven.exceptions import InvalidGitRepository
from .settings import *
# Import all constants to use throughout our application
# ==============================================================================
# CORE SETTINGS
# ==============================================================================
INSTALLED_APPS += [
'raven.contrib.django.raven_compat',
]
# ==============================================================================
# SECURITY SETTINGS
# ==============================================================================
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SECURE_HSTS_SECONDS = 60 * 60 * 24 * 7 * 52 # one year
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_SSL_REDIRECT = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
# ==============================================================================
# LOGGING SETTINGS
# ==============================================================================
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
},
'sentry': {
'level': 'INFO',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
},
'loggers': {
'': {
'handlers': ['console', 'sentry'],
'level': 'WARNING',
},
'colossus': {
'handlers': ['console', 'sentry'],
'level': 'INFO',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
}
}
# ==============================================================================
# THIRD-PARTY APPS SETTINGS
# ==============================================================================
RAVEN_CONFIG = {
'dsn': config('SENTRY_DSN', default='')
}
try:
RAVEN_CONFIG['release'] = raven.fetch_git_sha(BASE_DIR)
except InvalidGitRepository:
pass
|
py | b40fb02e083dcb22b80b5222ac0e5cfbb24965ae | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import random
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from collections import OrderedDict,defaultdict
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
net_glob1 = CNNMnist(args=args).to(args.device)
net_glob5 = CNNMnist(args=args).to(args.device)
net_glob7 = CNNMnist(args=args).to(args.device)
net_glob10 = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
net_glob1.train()
net_glob5.train()
net_glob7.train()
net_glob10.train()
# copy weights
w_glob = net_glob.state_dict()
w_glob1 = net_glob1.state_dict()
w_glob5 = net_glob5.state_dict()
w_glob7 = net_glob7.state_dict()
w_glob10 = net_glob10.state_dict()
# training - NO ATTACK
loss_train = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
#VIVEK constant attack experiment - 1 MALICIOUS
loss_train_1 = []
fixed_agent_1 = random.randint(0,10) #random agent between 0 and 31 is fixed
updates_recorded_1 = False
fixed_agent_storage_1 = None
count_array_1 = []
#VIVEK constant attack experiment - 20 MALICIOUS
loss_train_5 = []
fixed_agent_5 = random.sample(range(10),5)
updates_recorded_mapping_5 = defaultdict(bool)
for i in fixed_agent_5:
updates_recorded_mapping_5[i] = False
fixed_agent_storage_mapping_5 = {}
count_array_5 = []
#VIVEK constant attack experiment - 30 MALICIOUS
loss_train_7 = []
fixed_agent_7 = random.sample(range(10),7)
updates_recorded_mapping_7 = defaultdict(bool)
for i in fixed_agent_7:
updates_recorded_mapping_7[i] = False
fixed_agent_storage_mapping_7 = {}
count_array_7 = []
#VIVEK constant attack experiment - 30 MALICIOUS
loss_train_10 = []
fixed_agent_10 = random.sample(range(10),10)
updates_recorded_mapping_10 = defaultdict(bool)
for i in fixed_agent_10:
updates_recorded_mapping_10[i] = False
fixed_agent_storage_mapping_10 = {}
count_array_10 = []
for iter in range(args.epochs):
#agent_found_count = 0
w_locals, loss_locals = [], [] #w_locals = array of local_weights
w_locals_1, loss_locals_1 = [],[]
w_locals_5, loss_locals_5 = [],[]
w_locals_7, loss_locals_7 = [],[]
w_locals_10, loss_locals_10 = [],[]
m = max(int(args.frac * args.num_users), 1) #m = number of users used in one ROUND/EPOCH, check utils.options for more clarity on this
idxs_users = np.random.choice(range(args.num_users), m, replace=False) #Randomly selecting m users out of 32 users. NEED TO REPLACE THIS WITH OUR SAMPLING MECHANISM
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local1 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local5 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local7 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local10 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
w1, loss1 = local1.train(net=copy.deepcopy(net_glob1).to(args.device))
w5, loss5 = local5.train(net=copy.deepcopy(net_glob5).to(args.device))
w7, loss7 = local7.train(net=copy.deepcopy(net_glob7).to(args.device))
w10, loss10 = local10.train(net=copy.deepcopy(net_glob10).to(args.device))
print("***BLAH BLAH BLAH***")
if idx==fixed_agent_1:
if updates_recorded_1:
w1 = copy.deepcopy(fixed_agent_storage_1)
elif not updates_recorded_1:
fixed_agent_storage_1 = copy.deepcopy(w1)
updates_recorded_1 = True
if idx in fixed_agent_5:
if updates_recorded_mapping_5[idx]:
w5 = copy.deepcopy(fixed_agent_storage_mapping_5[idx])
elif not updates_recorded_mapping_5[idx]:
fixed_agent_storage_mapping_5[idx] = copy.deepcopy(w5)
updates_recorded_mapping_5[idx] = True
if idx in fixed_agent_7:
if updates_recorded_mapping_7[idx]:
w7 = copy.deepcopy(fixed_agent_storage_mapping_7[idx])
elif not updates_recorded_mapping_7[idx]:
fixed_agent_storage_mapping_7[idx] = copy.deepcopy(w7)
updates_recorded_mapping_7[idx] = True
if idx in fixed_agent_10:
if updates_recorded_mapping_10[idx]:
w10 = copy.deepcopy(fixed_agent_storage_mapping_10[idx])
elif not updates_recorded_mapping_10[idx]:
fixed_agent_storage_mapping_10[idx] = copy.deepcopy(w10)
updates_recorded_mapping_10[idx] = True
#NO ATTACK
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
#1 MALICIOUS
w_locals_1.append(copy.deepcopy(w1))
loss_locals_1.append(copy.deepcopy(loss1))
#20 MALICIOUS
w_locals_5.append(copy.deepcopy(w5))
loss_locals_5.append(copy.deepcopy(loss5))
#30 MALICIOUS
w_locals_7.append(copy.deepcopy(w7))
loss_locals_7.append(copy.deepcopy(loss7))
#30 MALICIOUS
w_locals_10.append(copy.deepcopy(w10))
loss_locals_10.append(copy.deepcopy(loss10))
# update global weights
w_glob = FedAvg(w_locals)
w_glob1 = FedAvg(w_locals_1)
w_glob5 = FedAvg(w_locals_5)
w_glob7 = FedAvg(w_locals_7)
w_glob10 = FedAvg(w_locals_10)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
net_glob1.load_state_dict(w_glob1)
net_glob5.load_state_dict(w_glob5)
net_glob7.load_state_dict(w_glob7)
net_glob10.load_state_dict(w_glob10)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
loss_avg_1 = sum(loss_locals_1) / len(loss_locals_1)
loss_avg_5 = sum(loss_locals_5) / len(loss_locals_5)
loss_avg_7 = sum(loss_locals_7) / len(loss_locals_7)
loss_avg_10 = sum(loss_locals_10) / len(loss_locals_10)
print('NO ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
print('C1 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_1))
print('C5 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_5))
print('C7 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_7))
print('C10 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_10))
#count_array.append(agent_found_count)
loss_train.append(loss_avg)
loss_train_1.append(loss_avg_1)
loss_train_5.append(loss_avg_5)
loss_train_7.append(loss_avg_7)
loss_train_10.append(loss_avg_10)
# plot loss curve
plt.figure()
attack_no = plt.plot(range(len(loss_train)), loss_train, label="0 malicious")
attack_no = plt.plot(range(len(loss_train_1)), loss_train_1, label="1 malicious")
attack_no = plt.plot(range(len(loss_train_5)), loss_train_5, label="5 malicious")
attack_no = plt.plot(range(len(loss_train_7)), loss_train_7, label="7 malicious")
attack_no = plt.plot(range(len(loss_train_10)), loss_train_10, label="10 malicious")
plt.xlabel('epochs')
plt.ylabel('train_loss')
plt.savefig('log/fed_{}_{}_{}_C{}_iid{}_train_loss.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
plt.close()
#print("COUNT DATA",str(count_array))
print("NO ATTACK DATA=",loss_train)
print("1 ATTACK DATA=",loss_train_1)
print("5 ATTACK DATA=",loss_train_5)
print("7 ATTACK DATA=",loss_train_7)
print("10 ATTACK DATA=",loss_train_10)
# testing
net_glob.eval()
#print("Agent_Found_Count",agent_found_count)
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))
net_glob1.eval()
acc_train1, loss_train_1 = test_img(net_glob1, dataset_train, args)
acc_test1, loss_test_1 = test_img(net_glob1, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_train1))
print("Testing accuracy (CONSTANT ATTACK 1): {:.2f}".format(acc_test1))
net_glob5.eval()
acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)
acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5))
print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5))
net_glob7.eval()
acc_train7, loss_train_7 = test_img(net_glob7, dataset_train, args)
acc_test7, loss_test_7 = test_img(net_glob7, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 7): {:.2f}".format(acc_train7))
print("Testing accuracy (CONSTANT ATTACK 7): {:.2f}".format(acc_test7))
net_glob10.eval()
acc_train10, loss_train_10 = test_img(net_glob10, dataset_train, args)
acc_test10, loss_test_10 = test_img(net_glob10, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_train10))
print("Testing accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_test10))
plt.figure()
objects = ("no_mal","1_mal","5_mal","7_mal","10_mal")
y_pos = np.arange(len(objects))
performance = [acc_test,acc_test1,acc_test5,acc_test7,acc_test10]
plt.bar(y_pos,performance,align='center',alpha=0.5)
plt.xticks(y_pos,objects)
plt.ylabel("Test Accuracy")
plt.title("Test Accuracy Analysis")
plt.savefig('log/fed_{}_{}_{}_C{}_iid{}_test_acc.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
plt.close
|
py | b40fb04fc02e794f30b8848f807dc1251ef9b546 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
import sys
from setuptools import find_packages, setup
from setuptools.extension import Extension
import numpy as np
if sys.version_info.major != 3:
sys.exit("scikit-bio can only be used with Python 3. You are currently "
"running Python %d." % sys.version_info.major)
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
if sys.platform == 'win32':
ssw_extra_compile_args = []
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.metadata._intersection",
["skbio/metadata/_intersection" + ext]),
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext],
include_dirs=[np.get_include()]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args,
include_dirs=[np.get_include()]),
Extension("skbio.diversity._phylogenetic",
["skbio/diversity/_phylogenetic" + ext],
include_dirs=[np.get_include()])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD-3-Clause',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="[email protected]",
maintainer="scikit-bio development team",
maintainer_email="[email protected]",
url='http://scikit-bio.org',
packages=find_packages(),
ext_modules=extensions,
include_dirs=[np.get_include()],
tests_require=['pytest', 'coverage'],
install_requires=[
'lockfile >= 0.10.2', # req'd for our usage of CacheControl
'CacheControl >= 0.11.5',
'decorator >= 3.4.2',
'IPython >= 3.2.0',
'matplotlib >= 1.4.3',
'natsort >= 4.0.3',
'numpy >= 1.9.2',
'pandas >= 1.0.0',
'scipy >= 1.3.0',
'hdmedians >= 0.13',
'scikit-learn >= 0.19.1'
],
classifiers=classifiers,
package_data={
'skbio.diversity.alpha.tests': ['data/qiime-191-tt/*'],
'skbio.diversity.beta.tests': ['data/qiime-191-tt/*'],
'skbio.io.tests': ['data/*'],
'skbio.io.format.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
|
py | b40fb3301df97326eb330b5dce06dd572132e03f | from __future__ import absolute_import
from io import IOBase
from future.utils import PY3
from os.path import normpath, join
import sys
# TODO: DOCUMENT THESE:
def join_norm(*args):
return normpath(join(*args))
def restore_sys_path(func):
"""Decorator used to restore the sys.path
to the value it was before the function call.
This is useful for loading modules.
"""
def newfunc(*args, **kwargs):
oldpath = sys.path[:]
try:
return func(*args, **kwargs)
finally:
sys.path = oldpath
return newfunc
def is_file_handle(handle):
if PY3:
return isinstance(handle, IOBase)
else:
return isinstance(handle, file)
|
py | b40fb4cdcfc32605eff05f624d6a827a48bf3611 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as weight_init
import torch
__all__ = ['MultipleBasicBlock','MultipleBasicBlock_4', 'MultipleBasicBlock_8']
def conv3x3(in_planes, out_planes, dilation = 1, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=int(dilation*(3-1)/2), dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, dilation = 1, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes,dilation, stride)
# self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
# self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# weight_init.xavier_normal()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
residual = x # 1 x 3 x H W
out = self.conv1(x)
# out = self.bn1(out)
out = self.relu(out) # 1 x 3 HW
out = self.conv2(out) # 1 x 3 HW
# out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MultipleBasicBlock(nn.Module):
def __init__(self,input_feature,
block, num_blocks,
intermediate_feature = 64, dense = True):
super(MultipleBasicBlock, self).__init__()
self.dense = dense
self.num_block = num_blocks
self.intermediate_feature = intermediate_feature
self.block1= nn.Sequential(*[
nn.Conv2d(input_feature, intermediate_feature,
kernel_size=7, stride=1, padding=3, bias=True),
nn.ReLU(inplace=True)
])
# for i in range(1, num_blocks):
self.block2 = block(intermediate_feature, intermediate_feature, dilation = 1) if num_blocks>=2 else None
self.block3 = block(intermediate_feature, intermediate_feature, dilation = 1) if num_blocks>=3 else None
self.block4 = block(intermediate_feature, intermediate_feature, dilation = 1) if num_blocks>=4 else None
self.block4_5 = block(intermediate_feature, intermediate_feature, dilation=1) if num_blocks >= 5 else None
self.block4_6 = block(intermediate_feature, intermediate_feature, dilation=1) if num_blocks >= 6 else None
self.block4_7 = block(intermediate_feature, intermediate_feature, dilation=1) if num_blocks >= 7 else None
self.block4_8 = block(intermediate_feature, intermediate_feature, dilation=1) if num_blocks >= 8 else None
self.block5 = nn.Sequential(*[nn.Conv2d(intermediate_feature, 3 , (3, 3), 1, (1, 1))])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.block1(x)
x = self.block2(x) if self.num_block >= 2 else x
x = self.block3(x) if self.num_block >= 3 else x
# x = self.block4(x) if self.num_block== 4 else x
x = self.block4(x) if self.num_block >= 4 else x
x = self.block4_5(x) if self.num_block >= 5 else x
x = self.block4_6(x) if self.num_block >= 6 else x
x = self.block4_7(x) if self.num_block >= 7 else x
x = self.block4_8(x) if self.num_block == 8 else x
x = self.block5(x)
return x
def MultipleBasicBlock_4(input_feature,intermediate_feature = 64):
model = MultipleBasicBlock(input_feature,
BasicBlock,4 ,
intermediate_feature)
return model
def MultipleBasicBlock_8(input_feature,intermediate_feature = 64):
model = MultipleBasicBlock(input_feature,
BasicBlock,8 ,
intermediate_feature)
return model
if __name__ == '__main__':
# x= Variable(torch.randn(2,3,224,448))
# model = S2DF(BasicBlock,3,True)
# y = model(x)
model = MultipleBasicBlock(200, BasicBlock,4)
model = BasicBlock(64,64,1)
# y = model(x)
exit(0) |
py | b40fb59a22465ee12aaf5d59e4b6cad66351fdf7 | from __future__ import division, absolute_import, print_function
import os
from drawBot.misc import executeExternalProcess, getExternalToolPath
def generateMP4(imageTemplate, mp4path, frameRate, codec="libx264"):
ffmpegPath = getExternalToolPath(os.path.dirname(__file__), "ffmpeg")
assert ffmpegPath is not None
cmds = [
# ffmpeg path
ffmpegPath,
"-y", # overwrite existing files
"-loglevel", "16", # 'error, 16' Show all errors, including ones which can be recovered from.
"-r", str(frameRate), # frame rate
"-i", imageTemplate, # input sequence
"-c:v", codec, # codec
"-crf", "20", # Constant Rate Factor
"-pix_fmt", "yuv420p", # pixel format
mp4path, # output path
]
executeExternalProcess(cmds)
|
py | b40fb5f498452bbdf100d161f696d5ee5381951e | # Copyright [yyyy] [name of copyright owner]
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
class MaskL1Loss(nn.Module):
def __init__(self):
super(MaskL1Loss, self).__init__()
def forward(self, pred: torch.Tensor, gt, mask):
mask_sum = mask.sum()
if mask_sum.item() == 0:
return mask_sum, dict(l1_loss=mask_sum)
else:
loss = (torch.abs(pred[:, 0] - gt) * mask).sum() / mask_sum
return loss, dict(l1_loss=loss)
class BalanceL1Loss(nn.Module):
def __init__(self, negative_ratio=3.):
super(BalanceL1Loss, self).__init__()
self.negative_ratio = negative_ratio
def forward(self, pred: torch.Tensor, gt, mask):
'''
Args:
pred: (N, 1, H, W).
gt: (N, H, W).
mask: (N, H, W).
'''
loss = torch.abs(pred[:, 0] - gt)
positive = loss * mask
negative = loss * (1 - mask)
positive_count = int(mask.sum())
negative_count = min(
int((1 - mask).sum()),
int(positive_count * self.negative_ratio))
negative_loss, _ = torch.topk(negative.view(-1), negative_count)
negative_loss = negative_loss.sum() / negative_count
positive_loss = positive.sum() / positive_count
return positive_loss + negative_loss,\
dict(l1_loss=positive_loss, nge_l1_loss=negative_loss)
|
py | b40fb6e4f593e74a5ac07ac1f6c11de229b6fd35 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# models.py
# @Author : 吴鹰 (wuying)
# @Link :
# @Date : 7/26/2018, 3:40:05 PM
import jwt
from time import time
from datetime import datetime
from hashlib import md5
from flask import current_app
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from app import db, login
from app.search import add_to_index, remove_from_index, query_index
# add Followers association table
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic',
)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, size)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
def is_following(self, user):
return self.followed.filter(
followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
followed = Post.query.join(
followers, (followers.c.followed_id == Post.user_id)).filter(
followers.c.follower_id == self.id)
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())
def get_reset_password_token(self, expires_in=1800):
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, current_app.config['SECRET_KEY'],
algorithms=['HS256'])['reset_password']
except:
return
return User.query.get(id)
def __repr__(self):
return '<User {}>'.format(self.username)
class SearchableMixin(object):
@classmethod
def search(cls, expression, page, per_page):
ids, total = query_index(cls.__tablename__, expression, page, per_page)
if total == 0:
return cls.query.filter_by(id=0), 0
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return cls.query.filter(cls.id.in_(ids)).order_by(
db.case(when, value=cls.id)), total
@classmethod
def before_commit(cls, session):
session._changes = {
'add': list(session.new),
'update': list(session.dirty),
'delete': list(session.deleted)
}
@classmethod
def after_commit(cls, session):
for obj in session._changes['add']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['update']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['delete']:
if isinstance(obj, SearchableMixin):
remove_from_index(obj.__tablename__, obj)
session._changes = None
@classmethod
def reindex(cls):
for obj in cls.query:
add_to_index(cls.__tablename__, obj)
class Post(SearchableMixin, db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self):
return '<Post {}>'.format(self.body)
class SearchableMixin(object):
@classmethod
def search(cls, expression, page, per_page):
ids, total = query_index(cls.__tablename__, expression, page, per_page)
if total == 0:
return cls.query.filter_by(id=0), 0
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return cls.query.filter(cls.id.in_(ids)).order_by(
db.case(when, value=cls.id)), total
@classmethod
def before_commit(cls, session):
session._changes = {
'add': list(session.new),
'update': list(session.dirty),
'delete': list(session.deleted)
}
@classmethod
def after_commit(cls, session):
for obj in session._changes['add']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['update']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['delete']:
if isinstance(obj, SearchableMixin):
remove_from_index(obj.__tablename__, obj)
session._changes = None
@classmethod
def reindex(cls):
for obj in cls.query:
add_to_index(cls.__tablename__, obj)
db.event.listen(db.session, 'before_commit', SearchableMixin.before_commit)
db.event.listen(db.session, 'after_commit', SearchableMixin.after_commit)
# add a load_user function for LoginManager
@login.user_loader
def load_user(id):
return User.query.get(int(id))
|
py | b40fb743bd00918839d34e242f2d2a89bff7b3eb | # Generated by Django 3.1.5 on 2021-02-03 00:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0047_auto_20210202_2223'),
]
operations = [
migrations.AlterField(
model_name='order',
name='order_ordered_date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 3, 6, 22, 17, 646546)),
),
migrations.AlterField(
model_name='product',
name='product_domain',
field=models.CharField(default=False, max_length=100),
),
migrations.AlterField(
model_name='product',
name='product_ordered_date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 3, 6, 22, 17, 646546)),
),
migrations.AlterField(
model_name='product',
name='product_selected',
field=models.TextField(default=False),
),
]
|
py | b40fb74aeaa129ba4f8967db2b7833fa92d724b2 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import cv2
import os
import numbers
import numpy as np
from skimage import color
import torch
import torchvision.utils as vutils
from torch.autograd import Variable
l_norm, ab_norm = 1., 1.
l_mean, ab_mean = 50., 0
def utf8_str(in_str):
try:
in_str = in_str.decode('UTF-8')
except Exception:
in_str = in_str.encode('UTF-8').decode('UTF-8')
return in_str
def load_gray_image(img_path):
img_path = utf8_str(img_path)
img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
return img_gray
def load_rgb_image(img_path):
img_path = utf8_str(img_path)
img_rgb = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
return img_rgb
def resize_img_longside(img_rgb, long_size, interpolation):
h, w = img_rgb.shape[:2]
scalar = long_size / max(h, w)
resized_img_rgb = cv2.resize(img_rgb, (int(w*scalar), int(h*scalar)), interpolation=interpolation)
return resized_img_rgb
def resize_img_shortside(img_rgb, short_size, interpolation):
h, w = img_rgb.shape[:2]
scalar = short_size / min(h, w)
resized_img_rgb = cv2.resize(img_rgb, (int(w*scalar), int(h*scalar)), interpolation=interpolation)
return resized_img_rgb
def resize_img(img_rgb, dsize, interpolation):
if isinstance(dsize, numbers.Number):
dsize = (int(dsize), int(dsize))
resized_img_rgb = cv2.resize(img_rgb, dsize, interpolation=interpolation)
return resized_img_rgb
def rgb2lab_transpose(img_rgb):
img_lab = color.rgb2lab(img_rgb).transpose((2, 0, 1))
return img_lab
def center_lab_img(img_lab):
img_lab_mc = img_lab / np.array((l_norm, ab_norm, ab_norm))[:, np.newaxis, np.newaxis] - np.array(
(l_mean / l_norm, ab_mean / ab_norm, ab_mean / ab_norm))[:, np.newaxis, np.newaxis]
return img_lab_mc
def center_l(l):
l_mc = (l - l_mean) / l_norm
return l_mc
def center_ab(ab):
ab_mc = (ab - ab_mean) / ab_norm
return ab_mc
def mult_mask(img_mask):
mask_mult = 110
return img_mask*mask_mult
def lab2rgb_transpose(img_l, img_ab):
''' INPUTS
img_l 1xXxX [0,100]
img_ab 2xXxX [-100,100]
OUTPUTS
returned value is XxXx3 '''
pred_lab = np.concatenate((img_l, img_ab), axis=0).transpose((1, 2, 0))
pred_rgb = (np.clip(color.lab2rgb(pred_lab), 0, 1)*255).astype('uint8')
return pred_rgb
def lab2rgb(img_l, img_ab):
''' INPUTS
img_l XxXx1 [0,100]
img_ab XxXx2 [-100,100]
OUTPUTS
returned value is XxXx3 '''
pred_lab = np.concatenate((img_l, img_ab), axis=2).astype('float64')
pred_rgb = color.lab2rgb(pred_lab)
pred_rgb = (np.clip(pred_rgb, 0, 1)*255).astype('uint8')
return pred_rgb
def batch_lab2rgb_transpose_mc(img_l_mc, img_ab_mc):
if isinstance(img_l_mc, Variable):
img_l_mc = img_l_mc.data.cpu()
if isinstance(img_ab_mc, Variable):
img_ab_mc = img_ab_mc.data.cpu()
if img_l_mc.is_cuda:
img_l_mc = img_l_mc.cpu()
if img_ab_mc.is_cuda:
img_ab_mc = img_ab_mc.cpu()
assert img_l_mc.dim()==4 and img_ab_mc.dim()==4, 'only for batch input'
img_l = img_l_mc*l_norm + l_mean
img_ab = img_ab_mc*ab_norm + ab_mean
pred_lab = torch.cat((img_l, img_ab), dim=1)
grid_lab = vutils.make_grid(pred_lab).numpy().astype('float64')
grid_rgb = (np.clip(color.lab2rgb(grid_lab.transpose((1, 2, 0))), 0, 1)*255).astype('uint8')
return grid_rgb
def lab2rgb_transpose_mc(img_l_mc, img_ab_mc):
if isinstance(img_l_mc, Variable):
img_l_mc = img_l_mc.data.cpu()
if isinstance(img_ab_mc, Variable):
img_ab_mc = img_ab_mc.data.cpu()
if img_l_mc.is_cuda:
img_l_mc = img_l_mc.cpu()
if img_ab_mc.is_cuda:
img_ab_mc = img_ab_mc.cpu()
assert img_l_mc.dim()==3 and img_ab_mc.dim()==3, 'only for batch input'
img_l = img_l_mc*l_norm + l_mean
img_ab = img_ab_mc*ab_norm + ab_mean
pred_lab = torch.cat((img_l, img_ab), dim=0)
grid_lab = pred_lab.numpy().astype('float64')
grid_rgb = (np.clip(color.lab2rgb(grid_lab.transpose((1, 2, 0))), 0, 1)*255).astype('uint8')
return grid_rgb
def mkdir_if_not(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def to_np(x):
return x.data.cpu().numpy()
|
pyw | b40fb8516d187533bc680726065367b9e2bc08d6 | import shutil, glob, os, PySimpleGUI as sg
# Zip each folder.
def makeZip(dir):
root_dir = dir
root_dir = root_dir.strip('\"')
root_dir = root_dir + '/*'
for i in glob.glob(root_dir):
if not os.path.isdir(i):
continue
shutil.make_archive(i, format='zip', root_dir=i)
if __name__ == '__main__':
sg.theme('') # Random theme
layout = [
[sg.Text('Dir:'), sg.Input(key='-DIR-')],
[sg.OK(), sg.Button('EXIT')]
]
window = sg.Window('make-zip', layout)
while True:
event, values = window.read()
# print(event)
if event is None or event == 'EXIT':
break
if event == 'OK':
root_dir = values["-DIR-"]
if root_dir == '':
sg.popup("Please enter a directory")
continue
makeZip(root_dir)
sg.popup('Done!')
window.close() |
py | b40fb89b5d3307e5869a4baefd9e398862f5ddad | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""GEMM Convolution schedule on ARM"""
import tvm
from tvm import te
from tvm.topi import nn
from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity
from ..util import get_const_tuple, get_const_int
from ..nn.util import get_pad_tuple
from .tensor_intrin import (
gemm_quantized,
gemm_quantized_impl,
gemm_acc_4x4_int8_int8_int32,
gemm_acc_nx16_int8_int8_int32,
)
from .arm_utils import is_aarch64_arm, is_dotprod_available
def configure_knobs(cfg, M, K):
""" Configure auto-tuning knobs for the interleaved strategy """
x, y = cfg.axis(M // 4), cfg.axis(K // 16)
cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]])
outer_loop, inner_loop = cfg.axis(4), cfg.axis(16)
cfg.define_annotate(
"A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec"
)
# Fallback configuration
if cfg.is_fallback:
cfg["reorder_gemm"] = ReorderEntity([0, 1])
cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"])
if not is_dotprod_available():
cfg.define_knob("gemm_quantized_unroll", [True, False])
cfg.define_knob("gemm_quantized_interleave", [True, False])
if cfg.is_fallback:
cfg["gemm_quantized_unroll"] = OtherOptionEntity(False)
cfg["gemm_quantized_interleave"] = OtherOptionEntity(True)
# Compute function
def compute_conv2d_gemm_without_weight_transform(
cfg,
data,
B_interleaved_t,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
interleave_A,
):
"""Compute conv2d by transforming the input,
executing GEMM and transforming the output back"""
batches, IH, IW, IC = get_const_tuple(data.shape)
KH, KW = get_const_tuple(kernel_size)
OC = get_const_int(output_channels)
kernel_area = KH * KW
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = get_const_tuple(dilation)
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
if pad_top or pad_left:
data_pad = nn.pad(
data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad"
)
else:
data_pad = data
# Im2col
M = OH * OW
K = IC * kernel_area
N = OC
A_shape = (batches, M, K)
if kernel_area == 1:
A = tvm.topi.reshape(data_pad, A_shape)
else:
A = te.compute(
A_shape,
lambda n, x, y: data_pad[
n,
HSTR * (x // OW) + dilation_h * ((y // IC) // KW),
WSTR * (x % OW) + dilation_w * ((y // IC) % KW),
y % IC,
],
name="data_im2col",
)
# Pad if necessary
N_transformed = B_interleaved_t.shape[0]
tile_rows_B = B_interleaved_t.shape[2]
tile_cols_B = B_interleaved_t.shape[3]
# Select the tiling strategy for A.
# The tiling information is chosen to maximize register usage during
# the tile computation.
#
# Please refer to:
# - https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product
# - Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h
# In order to have more information
#
if is_dotprod_available() and interleave_A:
# If dot product has been enabled, and we are interleaving A
# tile size should be 8x4
tile_rows_A = 8
tile_cols_A = 4
else:
# If either there is no dot product or if we are using a native strategy
# tile size should be 4x16
tile_rows_A = 4
tile_cols_A = 16
pad_M = 0
pad_K = 0
if M % tile_rows_A != 0:
pad_M = tile_rows_A - (M % tile_rows_A)
if K % tile_cols_A != 0:
pad_K = tile_cols_A - (K % tile_cols_A)
M_padded = M + pad_M
K_padded = K + pad_K
N_padded = N_transformed * tile_rows_B
pad_before = (0, 0, 0)
pad_after = (0, pad_M, pad_K)
if pad_M != 0 or pad_K != 0:
A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded")
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, K_padded), "k")
if interleave_A:
# Configuration space
configure_knobs(cfg, M_padded, K_padded)
# Pack the input data
A_interleaved = te.compute(
(batches, M_padded // tile_rows_A, K_padded // tile_cols_A, tile_rows_A, tile_cols_A),
lambda b, x, y, z, w: A[b, z + tile_rows_A * x, w + tile_cols_A * y],
name="A_interleaved",
)
# Execute GEMM
C_interleaved = te.compute(
(batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B),
lambda b, x, y, w, z: te.sum(
A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32")
* B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"),
axis=k,
),
name="C_interleaved",
)
# Unpack the result
C = te.compute(
(batches, M, N),
lambda b, x, y: C_interleaved[
b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A), idxm(y, tile_rows_B)
].astype(out_dtype),
name="C",
)
zero = tvm.tir.const(0)
else:
# No need to pack/unpack, execute GEMM directly
C = te.compute(
(batches, M_padded, N_padded),
lambda b, x, y: te.sum(
A[b, x, k].astype("int32")
* B_interleaved_t[
y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B)
].astype("int32"),
axis=k,
),
name="C",
)
# We need to ensure that infer bound pass does not remove the padding
# which is necessary for the tensorizations to work. So we need to
# add a dummy reference to the padding area of the result
zero = (
tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1]
- tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1]
)
# Reshape the result into a convolution output
out_shape = (batches, OH, OW, OC)
out = te.compute(
out_shape,
lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype),
name="conv2d_gemm_output",
)
return out
def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out):
""" Schedule the conv2d_gemm interleaved strategy """
C = out.op.input_tensors[0]
C_interleaved = C.op.input_tensors[0]
A_interleaved = C_interleaved.op.input_tensors[0]
# Input transform
A_interleaved_input = A_interleaved.op.input_tensors[0]
if A_interleaved_input.op.name == "A_padded":
s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3])
s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2])
s[A_interleaved_input].compute_inline()
data_im2col = A_interleaved_input.op.input_tensors[0]
else:
data_im2col = A_interleaved_input
b, m, n = data_im2col.op.axis
if data_im2col.op.name == "data_im2col":
n_outer, n_inner = s[data_im2col].split(n, 16)
s[data_im2col].unroll(n_outer)
s[data_im2col].vectorize(n_inner)
b_m_fused = s[data_im2col].fuse(b, m)
s[data_im2col].parallel(b_m_fused)
else:
s[data_im2col].compute_inline()
# Computation(through tensorize)
b, xo, yo, xi, yi = C_interleaved.op.axis
outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo])
b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm)
s[C_interleaved].parallel(b_outer_gemm_fused)
s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused)
_, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis
cfg["A_interleaved_unroll_vec"].apply(
s, A_interleaved, [outer_A_interleaved, inner_A_interleaved]
)
in_type = A_interleaved.dtype
out_type = C.dtype
k = C_interleaved.op.reduce_axis[0]
_, M, N = C.shape
if is_dotprod_available():
gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type)
xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile(
xi, yi, x_factor=8, y_factor=4
)
k_outer, k_inner = s[C_interleaved].split(k, 4)
xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4)
s[C_interleaved].reorder(
b_outer_gemm_fused,
inner_gemm,
xi_outer,
yi_outer,
k_outer,
xi_inner_outer,
xi_inner_inner,
yi_inner,
k_inner,
)
s[C_interleaved].tensorize(xi_inner_inner, gemm_acc)
s[C_interleaved].unroll(xi_inner_outer)
elif is_aarch64_arm():
s[C_interleaved].reorder(yi, xi)
K = A_interleaved_input.shape[2]
assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported"
unroll = cfg["gemm_quantized_unroll"].val
interleave = cfg["gemm_quantized_interleave"].val
gemm = gemm_quantized(M, N, K, unroll, interleave, in_type, out_type)
s[C_interleaved].pragma(
b_outer_gemm_fused,
"import_llvm",
gemm_quantized_impl(M, N, K, unroll, interleave, in_type),
)
s[C_interleaved].tensorize(yi, gemm)
# Output transform
if out != final_out:
n, h, w, c = out.op.axis
_, inner = s[out].split(c, 4)
s[C].compute_at(s[out], inner)
s[out].vectorize(inner)
return s
def schedule_conv2d_gemm_native(cfg, s, out, final_out):
""" Schedule the conv2d_gemm hybrid strategy """
C = out.op.input_tensors[0]
A = C.op.input_tensors[0]
in_type = A.dtype
# Computation
b, x, y = C.op.axis
(k,) = C.op.reduce_axis
k_outer, k_inner = s[C].split(k, 16)
x_outer, y_outer, x_inner, y_inner = s[C].tile(x, y, x_factor=4, y_factor=16)
s[C].reorder(b, x_outer, y_outer, k_outer, x_inner, y_inner, k_inner)
gemm_acc = gemm_acc_nx16_int8_int8_int32(in_type, rows=1)
s[C].unroll(x_inner)
s[C].tensorize(y_inner, gemm_acc)
s[C].parallel(x_outer)
# Input transform
if A.op.name == "A_padded":
padding_A = True
data_im2col = A.op.input_tensors[0]
else:
padding_A = False
data_im2col = A
b, m, n = data_im2col.op.axis
if data_im2col.op.name == "data_im2col":
n_outer, n_inner = s[data_im2col].split(n, 16)
s[data_im2col].unroll(n_outer)
s[data_im2col].vectorize(n_inner)
s[data_im2col].parallel(m)
elif padding_A:
s[data_im2col].compute_inline()
s[A].compute_at(s[C], x_inner)
else:
s[data_im2col].compute_at(s[C], x_inner)
# Output transform
if out != final_out:
n, h, w, c = out.op.axis
_, inner = s[out].split(c, 4)
s[out].vectorize(inner)
return s
|
py | b40fba45b2217afd5930c3c342b483d51574644b | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 29 16:55:39 2020
@author: melis
"""
import numpy as np
from matplotlib import pyplot as plt
import yaml
import calculating_env
# =============================================================================
if __name__ == '__main__':
file= open('use1.yaml', 'r')
use_dict = yaml.load(file, Loader=yaml.UnsafeLoader)
for key, value in use_dict.items():
print(key + " : " + str(value))
#
# =============================================================================
#need to make variables
water= use_dict.get('water',' ')
water_avg_use=use_dict.get('water_avg_use',' ')
water_std=use_dict.get('water_std',' ')
electricity= use_dict.get('electricity',' ')
elect_avg_use=use_dict.get('elect_avg_use',' ')
elect_std=use_dict.get('elect_std',' ')
diesel= use_dict.get('diesel',' ')
diesel_avg_use=use_dict.get('diesel_avg_use',' ')
diesel_std=use_dict.get('diesel_std',' ')
gas=use_dict.get('gas',' ')
gas_avg_use=use_dict.get('gas_avg_use',' ')
gas_std=use_dict.get('gas_std',' ')
al=use_dict.get('al',' ')
al_avg_use=use_dict.get('al_avg_use',' ')
al_std=use_dict.get('al_std',' ')
steel=use_dict.get('steel',' ')
steel_avg_use=use_dict.get('steel_avg_use',' ')
steel_std=use_dict.get('steel_std',' ')
HDPE=use_dict.get('HDPE',' ')
HDPE_avg_use=use_dict.get('HDPE_avg_use',' ')
HDPE_std=use_dict.get('HDPE_std',' ')
avg_env, min_env, max_env = calculating_env.calc_env(use_dict) |
py | b40fba6b0edaaa7f659a80719401f6b603ef1270 | from setuptools import setup, find_packages
requirements = ["websocket-client!=0.49"]
def readme():
with open('README.md') as f:
return f.read()
setup(
name="liquidtap",
version="1.0.1",
description="QuoineFinancial/LiquidTap websocket client for Python",
long_description=readme(),
long_description_content_type='text/markdown',
keywords="pusher websocket client liquid liquidapi liquidtap",
author="Jered Masters",
author_email="[email protected]",
license="MIT",
url="https://github.com/QuoineFinancial/liquid-tap-python",
install_requires=requirements,
packages=find_packages(exclude=['contrib', 'docs', 'websocket-client', 'txaio']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
py | b40fbb69e4779c6c84efa548b564e253bc2819a3 | import os
import sys
from Qt import QtWidgets, QtCore
import qtawesome
from avalon import io, api
from openpype import style
from openpype.tools.utils.delegates import VersionDelegate
from openpype.tools.utils.lib import (
qt_app_context,
preserve_expanded_rows,
preserve_selection,
FamilyConfigCache
)
from .model import (
InventoryModel,
FilterProxyModel
)
from .view import SceneInventoryView
module = sys.modules[__name__]
module.window = None
class SceneInventoryWindow(QtWidgets.QDialog):
"""Scene Inventory window"""
def __init__(self, parent=None):
super(SceneInventoryWindow, self).__init__(parent)
if not parent:
self.setWindowFlags(
self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint
)
project_name = os.getenv("AVALON_PROJECT") or "<Project not set>"
self.setWindowTitle("Scene Inventory 1.0 - {}".format(project_name))
self.setObjectName("SceneInventory")
# Maya only property
self.setProperty("saveWindowPref", True)
self.resize(1100, 480)
# region control
filter_label = QtWidgets.QLabel("Search", self)
text_filter = QtWidgets.QLineEdit(self)
outdated_only_checkbox = QtWidgets.QCheckBox(
"Filter to outdated", self
)
outdated_only_checkbox.setToolTip("Show outdated files only")
outdated_only_checkbox.setChecked(False)
icon = qtawesome.icon("fa.arrow-up", color="white")
update_all_button = QtWidgets.QPushButton(self)
update_all_button.setToolTip("Update all outdated to latest version")
update_all_button.setIcon(icon)
icon = qtawesome.icon("fa.refresh", color="white")
refresh_button = QtWidgets.QPushButton(self)
refresh_button.setToolTip("Refresh")
refresh_button.setIcon(icon)
control_layout = QtWidgets.QHBoxLayout()
control_layout.addWidget(filter_label)
control_layout.addWidget(text_filter)
control_layout.addWidget(outdated_only_checkbox)
control_layout.addWidget(update_all_button)
control_layout.addWidget(refresh_button)
# endregion control
family_config_cache = FamilyConfigCache(io)
model = InventoryModel(family_config_cache)
proxy = FilterProxyModel()
proxy.setSourceModel(model)
proxy.setDynamicSortFilter(True)
proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
view = SceneInventoryView(self)
view.setModel(proxy)
# set some nice default widths for the view
view.setColumnWidth(0, 250) # name
view.setColumnWidth(1, 55) # version
view.setColumnWidth(2, 55) # count
view.setColumnWidth(3, 150) # family
view.setColumnWidth(4, 100) # namespace
# apply delegates
version_delegate = VersionDelegate(io, self)
column = model.Columns.index("version")
view.setItemDelegateForColumn(column, version_delegate)
layout = QtWidgets.QVBoxLayout(self)
layout.addLayout(control_layout)
layout.addWidget(view)
# signals
text_filter.textChanged.connect(self._on_text_filter_change)
outdated_only_checkbox.stateChanged.connect(
self._on_outdated_state_change
)
view.hierarchy_view_changed.connect(
self._on_hierarchy_view_change
)
view.data_changed.connect(self.refresh)
refresh_button.clicked.connect(self.refresh)
update_all_button.clicked.connect(self._on_update_all)
self._update_all_button = update_all_button
self._outdated_only_checkbox = outdated_only_checkbox
self._view = view
self._model = model
self._proxy = proxy
self._version_delegate = version_delegate
self._family_config_cache = family_config_cache
self._first_show = True
family_config_cache.refresh()
def showEvent(self, event):
super(SceneInventoryWindow, self).showEvent(event)
if self._first_show:
self._first_show = False
self.setStyleSheet(style.load_stylesheet())
def keyPressEvent(self, event):
"""Custom keyPressEvent.
Override keyPressEvent to do nothing so that Maya's panels won't
take focus when pressing "SHIFT" whilst mouse is over viewport or
outliner. This way users don't accidentally perform Maya commands
whilst trying to name an instance.
"""
def refresh(self, items=None):
with preserve_expanded_rows(
tree_view=self._view,
role=self._model.UniqueRole
):
with preserve_selection(
tree_view=self._view,
role=self._model.UniqueRole,
current_index=False
):
kwargs = {"items": items}
# TODO do not touch view's inner attribute
if self._view._hierarchy_view:
kwargs["selected"] = self._view._selected
self._model.refresh(**kwargs)
def _on_hierarchy_view_change(self, enabled):
self._proxy.set_hierarchy_view(enabled)
self._model.set_hierarchy_view(enabled)
def _on_text_filter_change(self, text_filter):
self._proxy.setFilterRegExp(text_filter)
def _on_outdated_state_change(self):
self._proxy.set_filter_outdated(
self._outdated_only_checkbox.isChecked()
)
def _on_update_all(self):
self._view.update_all()
def show(root=None, debug=False, parent=None, items=None):
"""Display Scene Inventory GUI
Arguments:
debug (bool, optional): Run in debug-mode,
defaults to False
parent (QtCore.QObject, optional): When provided parent the interface
to this QObject.
items (list) of dictionaries - for injection of items for standalone
testing
"""
try:
module.window.close()
del module.window
except (RuntimeError, AttributeError):
pass
if debug is True:
io.install()
if not os.environ.get("AVALON_PROJECT"):
any_project = next(
project for project in io.projects()
if project.get("active", True) is not False
)
api.Session["AVALON_PROJECT"] = any_project["name"]
else:
api.Session["AVALON_PROJECT"] = os.environ.get("AVALON_PROJECT")
with qt_app_context():
window = SceneInventoryWindow(parent)
window.show()
window.refresh(items=items)
module.window = window
# Pull window to the front.
module.window.raise_()
module.window.activateWindow()
|
py | b40fbd65786a74cce2276d2580f49eaea42f7a4a | from __future__ import absolute_import
from django.forms.fields import CharField
from django.forms.widgets import PasswordInput
from django.utils.translation import ugettext_lazy as _
from fobi.base import FormFieldPlugin, get_theme
from . import UID
from .forms import PasswordInputForm
__title__ = 'fobi.contrib.plugins.form_elements.fields.password.base'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('PasswordInputPlugin',)
theme = get_theme(request=None, as_instance=True)
class PasswordInputPlugin(FormFieldPlugin):
"""Password field plugin."""
uid = UID
name = _("Password")
group = _("Fields")
form = PasswordInputForm
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
"""Get form field instances."""
widget_attrs = {
'class': theme.form_element_html_class,
'placeholder': self.data.placeholder,
}
field_kwargs = {
'label': self.data.label,
'help_text': self.data.help_text,
'initial': self.data.initial,
'required': self.data.required,
'widget': PasswordInput(attrs=widget_attrs),
}
if self.data.max_length:
field_kwargs['max_length'] = self.data.max_length
return [(self.data.name, CharField, field_kwargs)]
|
py | b40fbd80f7dd8232663c0123e611a886f587a525 | # Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tf_functions.py."""
from absl.testing import absltest
from absl.testing import parameterized
import funcsigs
from tf_coder import filter_group
from tf_coder import tf_coder_utils
from tf_coder import tf_functions
import torch
# FunctionInfo names should match this.
FUNCTION_INFO_NAME_REGEX = r'\w+(\.\w+)*\(\w+(, \w+(=[^,=()]+)?)*\)'
class TfFunctionsTest(parameterized.TestCase):
def _check_function(self, function_name, usable_args, constant_kwargs):
"""Checks for errors in one entry of tf_functions.PY_FUNCTIONS."""
try:
func_obj = tf_coder_utils.get_tf_function(function_name)
except ValueError:
func_obj = None
self.assertIsNotNone(func_obj,
'Could not find function {}.'.format(function_name))
self.assertLen(set(usable_args), len(usable_args),
'Function {} has duplicate usable arguments.'.format(
function_name))
parameters = funcsigs.signature(func_obj).parameters
for param_name in parameters:
param = parameters[param_name]
if param.default is param.empty:
self.assertIn(param_name, usable_args,
"Function {} is missing required argument '{}'.".format(
function_name, param_name))
ordered_param_names = list(parameters)
last_index = -1
for arg_name in usable_args:
self.assertIn(arg_name, ordered_param_names,
"Function {} has invalid argument '{}'.".format(
function_name, arg_name))
cur_index = ordered_param_names.index(arg_name)
self.assertGreater(cur_index, last_index,
"Function {} has argument '{}' out of order.".format(
function_name, arg_name))
last_index = cur_index
for kwarg_name in constant_kwargs:
self.assertIn(kwarg_name, ordered_param_names,
"Function {} has invalid kwarg '{}'.".format(
function_name, kwarg_name))
self.assertNotIn(kwarg_name, usable_args,
"Function {} has repeated argument '{}'.".format(
function_name, kwarg_name))
def test_check_function_passes(self):
self._check_function('tf.argsort', ['values', 'axis'],
{'direction': 'DESCENDING', 'stable': True})
@parameterized.named_parameters(
('bad_function', 'tf.this_function_does_not_exist', ['x', 'y'], {}),
('duplicate_args', 'tf.add', ['x', 'y', 'x'], {}),
('missing_arg', 'tf.add', ['x'], {}),
('invalid_arg', 'tf.add', ['x', 'y', 'z'], {}),
('out_of_order', 'tf.add', ['y', 'x'], {}),
('duplicate_kwarg', 'tf.argsort', ['values', 'axis'], {'axis': -1}),
('invalid_kwarg', 'tf.argsort', ['values', 'axis'], {'invalid': True}))
def test_check_function_fails(self, function_name, usable_args,
constant_kwargs):
with self.assertRaises(AssertionError):
self._check_function(function_name, usable_args, constant_kwargs)
@parameterized.named_parameters(
('not_tf', 'np.foo(axis)'),
('nested_modules', 'tf.nn.foo(tensor, axis)'),
('no_module', 'foo(tensor, axis)'),
('string_literal', 'foo(tensor, axis, baz="a constant string")'),
('boolean_literal', 'foo(tensor, axis, baz=False)'),
('two_literals', 'foo(tensor, bar=[], baz=1.0)'))
def test_function_info_name_regex_passes(self, good_name):
self.assertRegex(good_name, FUNCTION_INFO_NAME_REGEX)
@parameterized.named_parameters(
('bad_characters', 'tf.foo(axis=1)'),
('extra_spaces_1', 'tf.foo(tensor, axis)'),
('extra_spaces_2', 'tf.foo( tensor, axis)'),
('extra_spaces_3', 'tf.foo (tensor, axis)'),
('missing_space', 'tf.foo(tensor,axis)'),
('missing_middle_arg', 'tf.foo(tensor, , axis)'),
('missing_last_arg', 'tf.foo(tensor, )'),
('no_args', 'tf.foo()'),
('no_parens', 'tf.foo'),
('empty_literal', 'tf.foo(a, x=)'),
('literal_with_bad_char', 'tf.foo(a, x=",")'))
def test_function_info_name_regex_fails(self, bad_name):
self.assertNotRegex(bad_name, FUNCTION_INFO_NAME_REGEX)
@parameterized.named_parameters(
('tf_functions', tf_functions.PY_FUNCTIONS))#,
# ('sparse_functions', tf_functions.SPARSE_FUNCTIONS))
def test_function_lists(self, function_list):
for function_info in function_list:
self.assertRegex(function_info.name, FUNCTION_INFO_NAME_REGEX)
self.assertIsInstance(function_info.filter_group,
filter_group.FilterGroup)
self.assertIsInstance(function_info.weight, int)
function_name, usable_args, constant_kwargs = (
tf_functions.parse_function_info_name(function_info))
self._check_function(function_name, usable_args, constant_kwargs)
def test_parse_function_info_name(self):
function_info = tf_functions.FunctionInfo(
name='tf.foo.bar(tensor, axis, baz=True)',
filter_group=filter_group.FilterGroup.NONE,
weight=1)
self.assertEqual(tf_functions.parse_function_info_name(function_info),
('tf.foo.bar', ['tensor', 'axis'], {'baz': True}))
@parameterized.named_parameters(
('no_open_paren', 'tf.foo.bar tensor, axis)'),
('multiple_open_parens', 'tf.foo.bar((tensor, axis)'),
('no_close_paren', 'tf.foo.bar(tensor, axis'),
('close_paren_not_at_end', 'tf.foo.bar(tensor, axis) '))
def test_parse_function_info_name_fails_for_bad_name(self, bad_name):
function_info = tf_functions.FunctionInfo(
name=bad_name,
filter_group=filter_group.FilterGroup.NONE,
weight=1)
with self.assertRaises(ValueError):
tf_functions.parse_function_info_name(function_info)
if __name__ == '__main__':
absltest.main()
|
py | b40fbdd77f9c47b343a60af2ea7e37763e483a01 | import solve
import utils
def test_binary_digits_to_places_zero():
bin_string = "00000"
expected = []
places = utils.binary_digits_to_places(bin_string)
assert places == expected
def test_binary_digits_to_places_single_bit():
bin_string = "00001"
expected = [1]
places = utils.binary_digits_to_places(bin_string)
assert places == expected
def test_binary_digits_to_places_multiple_bits():
bin_string = "01011"
expected = [1, 2, 8]
places = utils.binary_digits_to_places(bin_string)
assert places == expected
def test_binary_digits_to_places_all_bits():
bin_string = "11111"
expected = [1, 2, 4, 8, 16]
places = utils.binary_digits_to_places(bin_string)
assert places == expected
def test_count_bits_by_place_empty():
places_list = []
expected = {}
counts = utils.count_bits_by_place(places_list)
assert counts == expected
def test_count_bits_by_place():
places_list = [1, 2, 4, 8, 2, 8, 4, 16]
expected = {
1: 1,
2: 2,
4: 2,
8: 2,
16: 1
}
counts = utils.count_bits_by_place(places_list)
assert counts == expected
def test_count_bits_by_same_place():
places_list = [2, 2, 2, 2, 2, 2, 2, 2]
expected = {
2: 8,
}
counts = utils.count_bits_by_place(places_list)
assert counts == expected
def test_bitwise_not_zero():
val = 0
expected = 1
result = utils.bitwise_not(val)
assert result == expected
def test_bitwise_not_all_ones():
val = 255
expected = 0
result = utils.bitwise_not(val)
assert result == expected
def test_bitwise_not_force_bit_size():
val = 7
expected = 248
result = utils.bitwise_not(val, bitsize=8)
assert result == expected
def test_part1_sample_input():
binary_strings = [
"00100",
"11110",
"10110",
"10111",
"10101",
"01111",
"00111",
"11100",
"10000",
"11001",
"00010",
"01010",
]
result = solve.part_1(binary_strings)
assert result == 198
def most_common_empty_iterable():
values = []
expected = None
result = utils.most_common(values)
assert result == expected
def most_common_single_value():
values = [
1, 0, 1, 1, 0, 1, 1, 0]
expected = [1]
result = utils.most_common(values)
assert result == expected
def test_most_common_bits():
bit_lists = [
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 1, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[0, 0, 1, 1, 1],
[1, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 1, 0, 1, 0],
]
expected = [1, 0, 1, 1, 0]
result = utils.most_common_bits(bit_lists)
assert result == expected
def test_filter_by_places_no_filter():
places = [1, 2, 4, 8]
places_lists = [
[1],
[2, 8],
[1, 4],
[8]
]
filter_places = []
expected = [1]
results = utils.filter_by_places(places_lists, places, filter_places)
assert results == expected
def test_filter_by_places_single_place_filter():
places = [1, 2, 4, 8]
places_lists = [
[1, 2],
[2, 8],
[1, 4],
[8]
]
filter_places = [8]
expected = [8]
results = utils.filter_by_places(places_lists, places, filter_places)
assert results == expected
def test_filter_by_places_multi_place_filter():
places = [1, 2, 4, 8]
places_lists = [
[1, 2],
[2, 8],
[1, 4],
[8]
]
filter_places = [2, 4]
expected = [1, 4]
results = utils.filter_by_places(places_lists, places, filter_places)
assert results == expected
def test_part2_sample_input():
binary_strings = [
"00100",
"11110",
"10110",
"10111",
"10101",
"01111",
"00111",
"11100",
"10000",
"11001",
"00010",
"01010",
]
result = solve.part_2(binary_strings)
assert result == 230
|
py | b40fbe0a014a566dd5241651c9d6c3c2db8437d6 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 20:31:09 2018
Algoritmo en python del ejercicio creado en:
A Novel Numerical Approach to the MCLP Based Resilent Supply Chain Optimization
V. Azhmyakov, J.P. Fernández-Gutiérrez, S.K. Gadi, St. Pickl
5. Application to the optimal design of a resilent supply chain management
system
@author: ivand
"""
import numpy as np
# El ejercicio contempla un modelo MCLP básico y optimiza una cadena de
# suministro resilente para un conjunto de plantas manufactureras y
# almacenes.
# La resilencia de este sistema de gestión de cadena de suministro es modelada
# en este ejercicio con una matriz A con el componente de tipo difuso a_ij
# Esta cadena de suministro conceptual incluye
l = 8 # Plantas manufactureras j
n = 5 # Almacenes i
k = 2 # nuevos almacenes considerados
# La matriz correspondiente esta definida de la siguiente manera:
A=np.array([[0.81286, 0.25123, 0.0, 0.54893, 1.0, 0.77105, 0.0, 0.64741],
[0.0, 0.58108, 0.0, 0.90309, 0.0, 0.27081, 0.51569, 0.91733],
[0.0, 0.32049, 0.64850, 0.74559, 0.0, 0.65833, 0.0, 0.60562],
[0.62968, 0.89444, 0.91921, 0.50869, 0.0, 0.60434, 0.0, 0.63874],
[0.0, 0.79300, 0.94740, 0.99279, 0.0, 0.23595, 0.57810, 0.71511]])
# Los pesos objetivo w_j indican una prioridad
# y debe asumirse que son iguales a:
w = np.array([32.0, 19.0, 41.0, 26.0, 37.0, 49.0, 50.0, 11.0])
# El quinto punto de demanda en este ejemplo no tiene un carácter resilente
# ya que solo una fabrica lo cubre.
# Asumimos que quien toma las decisiones está interesado en abrir otro punto.
# o de otra manera,
# Calculamos de la ecuación 12 en donde
# S_A_i := sum mu_j * a_i
# y
# A_i := (a_ij,..,a_in)'
# en donde a_ij es un componente de la matriz de elegibilidad
# Dado mu de la siguiente manera
mu = np.array([2,2,1,2,2,2,1,2])
# se calcula S
S = np.inner(mu,A)
# Se definen dos funciones que ayudan en la busqueda de la solución óptima
def z(n):
'''
Esta función calcula Z_h en base al parámetro n
que puede ser el número de almacenes o manufactureras
Regresa un vector Z_h de tamaño [2**n-1, n] con valores
booleanos {0,1}
'''
i = 2**n
Z_h = np.zeros((i-1,n))
p = 2**np.arange(n)
for j in range(0,i):
d = j * np.ones((1,n))
Z_h[j-1,] = np.floor((d%(2*p))/p)
return Z_h
def sol(XMn, fOptimEn, Zn):
'''
Función que devuelve la solución óptima a las ecuaciónes
Usa los parámetros
XMn - Matriz de tipo XM
fOptimEn - valor máximo de la matriz XMn
Zn - matriz de tipo Z o restricciones
'''
return Zn[np.where(XMn == fOptimEn),:]
Z1 = z(n)
Z2 = z(l)
D = np.ones(n)
I1 = np.zeros(2**n-1)
#I1 = Z1[np.where(np.dot(Z1, D) == k), :]
for i in range(0,2**n-1):
if (np.dot(Z1[i,:],D) == k):
I1[i] = 1
X1 = np.dot(Z1, S)
XM1 = X1.transpose()*I1
XM1 = XM1.transpose()
fOptimE2 = XM1.max()
solE2 = sol(XM1, fOptimE2, Z1)
D2 = np.inner(A.transpose(),solE2)
###########
I2 = np.zeros((2**l-1))
for i in range(0, 2**l-1):
if ((Z2[i,0] <= D2[0]) and
(Z2[i,1] <= D2[1]) and
(Z2[i,2] <= D2[2]) and
(Z2[i,3] <= D2[3]) and
(Z2[i,4] <= D2[4]) and
(Z2[i,5] <= D2[5]) and
(Z2[i,6] <= D2[6]) and
(Z2[i,7] <= D2[7])):
I2[i] = 1
'''
for i in range(0,2**l-1):
if (Z2[i,].all() <= D2.all()):
I2[i] = 1
'''
# Discriminar de vector de funcion objetivo X la combinacion que cumple la
# restriccion
X2 = np.inner(w,Z2)
XM2 = X2*I2
fOptimE3 = XM2.max()
solE3 = sol(XM2, fOptimE3, Z2)
print(solE2)
print(fOptimE2)
print(solE3)
print(fOptimE3)
|
py | b40fc116b2f019010ec72c35073982187c1c87e8 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ImportMode(str, Enum):
no_force = "NoForce"
force = "Force"
class SkuName(str, Enum):
classic = "Classic"
basic = "Basic"
standard = "Standard"
premium = "Premium"
class SkuTier(str, Enum):
classic = "Classic"
basic = "Basic"
standard = "Standard"
premium = "Premium"
class ProvisioningState(str, Enum):
creating = "Creating"
updating = "Updating"
deleting = "Deleting"
succeeded = "Succeeded"
failed = "Failed"
canceled = "Canceled"
class DefaultAction(str, Enum):
allow = "Allow"
deny = "Deny"
class Action(str, Enum):
allow = "Allow"
class PasswordName(str, Enum):
password = "password"
password2 = "password2"
class RegistryUsageUnit(str, Enum):
count = "Count"
bytes = "Bytes"
class PolicyStatus(str, Enum):
enabled = "enabled"
disabled = "disabled"
class TrustPolicyType(str, Enum):
notary = "Notary"
class WebhookStatus(str, Enum):
enabled = "enabled"
disabled = "disabled"
class WebhookAction(str, Enum):
push = "push"
delete = "delete"
quarantine = "quarantine"
chart_push = "chart_push"
chart_delete = "chart_delete"
class RunStatus(str, Enum):
queued = "Queued"
started = "Started"
running = "Running"
succeeded = "Succeeded"
failed = "Failed"
canceled = "Canceled"
error = "Error"
timeout = "Timeout"
class RunType(str, Enum):
quick_build = "QuickBuild"
quick_run = "QuickRun"
auto_build = "AutoBuild"
auto_run = "AutoRun"
class OS(str, Enum):
windows = "Windows"
linux = "Linux"
class Architecture(str, Enum):
amd64 = "amd64"
x86 = "x86"
arm = "arm"
class Variant(str, Enum):
v6 = "v6"
v7 = "v7"
v8 = "v8"
class TaskStatus(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class BaseImageDependencyType(str, Enum):
build_time = "BuildTime"
run_time = "RunTime"
class SourceControlType(str, Enum):
github = "Github"
visual_studio_team_service = "VisualStudioTeamService"
class TokenType(str, Enum):
pat = "PAT"
oauth = "OAuth"
class SourceTriggerEvent(str, Enum):
commit = "commit"
pullrequest = "pullrequest"
class TriggerStatus(str, Enum):
disabled = "Disabled"
enabled = "Enabled"
class BaseImageTriggerType(str, Enum):
all = "All"
runtime = "Runtime"
class SourceRegistryLoginMode(str, Enum):
none = "None"
default = "Default"
class SecretObjectType(str, Enum):
opaque = "Opaque"
|
py | b40fc1b55472dfed243803ac4e08cdf0ba8c0789 | # Copyright (c) 2018, NVIDIA CORPORATION.
import itertools
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
import cudf
from cudf.core import DataFrame, Series
from cudf.tests.utils import assert_eq
_now = np.datetime64("now")
_tomorrow = _now + np.timedelta64(1, "D")
_now = np.int64(_now.astype("datetime64[ns]"))
_tomorrow = np.int64(_tomorrow.astype("datetime64[ns]"))
def make_frame(
dataframe_class,
nelem,
seed=0,
extra_levels=(),
extra_vals=(),
with_datetime=False,
):
np.random.seed(seed)
df = dataframe_class()
df["x"] = np.random.randint(0, 5, nelem)
df["y"] = np.random.randint(0, 3, nelem)
for lvl in extra_levels:
df[lvl] = np.random.randint(0, 2, nelem)
df["val"] = np.random.random(nelem)
for val in extra_vals:
df[val] = np.random.random(nelem)
if with_datetime:
df["datetime"] = np.random.randint(
_now, _tomorrow, nelem, dtype=np.int64
).astype("datetime64[ns]")
return df
def get_nelem():
for elem in [2, 3, 1000]:
yield elem
@pytest.fixture
def gdf():
return DataFrame({"x": [1, 2, 3], "y": [0, 1, 1]})
@pytest.fixture
def pdf(gdf):
return gdf.to_pandas()
@pytest.mark.parametrize("nelem", [2, 3, 100, 1000])
def test_groupby_mean(nelem):
got_df = make_frame(DataFrame, nelem=nelem).groupby(["x", "y"]).mean()
expect_df = (
make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).mean()
)
assert_eq(got_df, expect_df)
@pytest.mark.parametrize("nelem", [2, 3, 100, 1000])
def test_groupby_mean_3level(nelem):
lvls = "z"
bys = list("xyz")
got_df = (
make_frame(DataFrame, nelem=nelem, extra_levels=lvls)
.groupby(bys)
.mean()
)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem, extra_levels=lvls)
.groupby(bys)
.mean()
)
assert_eq(got_df, expect_df)
@pytest.mark.parametrize("nelem", [2, 3, 100, 1000])
def test_groupby_agg_mean_min(nelem):
got_df = (
make_frame(DataFrame, nelem=nelem)
.groupby(["x", "y"])
.agg(["mean", "min"])
)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem)
.groupby(["x", "y"])
.agg(["mean", "min"])
)
assert_eq(got_df, expect_df)
@pytest.mark.parametrize("nelem", [2, 3, 100, 1000])
def test_groupby_agg_min_max_dictargs(nelem):
expect_df = (
make_frame(pd.DataFrame, nelem=nelem, extra_vals="ab")
.groupby(["x", "y"])
.agg({"a": "min", "b": "max"})
)
got_df = (
make_frame(DataFrame, nelem=nelem, extra_vals="ab")
.groupby(["x", "y"])
.agg({"a": "min", "b": "max"})
)
assert_eq(expect_df, got_df)
@pytest.mark.parametrize("nelem", [2, 3, 100, 1000])
def test_groupby_agg_min_max_dictlist(nelem):
expect_df = (
make_frame(pd.DataFrame, nelem=nelem, extra_vals="ab")
.groupby(["x", "y"])
.agg({"a": ["min", "max"], "b": ["min", "max"]})
)
got_df = (
make_frame(DataFrame, nelem=nelem, extra_vals="ab")
.groupby(["x", "y"])
.agg({"a": ["min", "max"], "b": ["min", "max"]})
)
assert_eq(got_df, expect_df)
@pytest.mark.parametrize("nelem", [2, 3, 100, 1000])
@pytest.mark.parametrize("func", ["mean", "min", "max", "count", "sum"])
def test_groupby_2keys_agg(nelem, func):
# gdf (Note: lack of multindex)
expect_df = (
make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func)
)
got_df = make_frame(DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func)
check_dtype = False if func == "count" else True
assert_eq(got_df, expect_df, check_dtype=check_dtype)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_as_index_single_agg(pdf, gdf, as_index):
gdf = gdf.groupby("y", as_index=as_index).agg({"x": "mean"})
pdf = pdf.groupby("y", as_index=as_index).agg({"x": "mean"})
assert_eq(pdf, gdf)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_as_index_multiindex(pdf, gdf, as_index):
pdf = pd.DataFrame(
{"a": [1, 2, 1], "b": [3, 3, 3], "c": [2, 2, 3], "d": [3, 1, 2]}
)
gdf = cudf.from_pandas(pdf)
gdf = gdf.groupby(["a", "b"], as_index=as_index).agg({"c": "mean"})
pdf = pdf.groupby(["a", "b"], as_index=as_index).agg({"c": "mean"})
if as_index:
assert_eq(pdf, gdf)
else:
# column names don't match - check just the values
for gcol, pcol in zip(gdf, pdf):
assert_array_equal(gdf[gcol].to_array(), pdf[pcol].values)
def test_groupby_default(pdf, gdf):
gdf = gdf.groupby("y").agg({"x": "mean"})
pdf = pdf.groupby("y").agg({"x": "mean"})
assert_eq(pdf, gdf)
def test_group_keys_true(pdf, gdf):
gdf = gdf.groupby("y", group_keys=True).sum()
pdf = pdf.groupby("y", group_keys=True).sum()
assert_eq(pdf, gdf)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_getitem_getattr(as_index):
pdf = pd.DataFrame({"x": [1, 3, 1], "y": [1, 2, 3], "z": [1, 4, 5]})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.groupby("x")["y"].sum(), gdf.groupby("x")["y"].sum())
assert_eq(pdf.groupby("x").y.sum(), gdf.groupby("x").y.sum())
assert_eq(pdf.groupby("x")[["y"]].sum(), gdf.groupby("x")[["y"]].sum())
assert_eq(
pdf.groupby(["x", "y"], as_index=as_index).sum(),
gdf.groupby(["x", "y"], as_index=as_index).sum(),
)
def test_groupby_cats():
df = DataFrame()
df["cats"] = pd.Categorical(list("aabaacaab"))
df["vals"] = np.random.random(len(df))
cats = np.asarray(list(df["cats"]))
vals = df["vals"].to_array()
grouped = df.groupby(["cats"], as_index=False).mean()
got_vals = grouped["vals"]
got_cats = grouped["cats"]
for c, v in zip(got_cats, got_vals):
print(c, v)
expect = vals[cats == c].mean()
np.testing.assert_almost_equal(v, expect)
def test_groupby_iterate_groups():
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
def assert_values_equal(arr):
np.testing.assert_array_equal(arr[0], arr)
for name, grp in df.groupby(["key1", "key2"]):
pddf = grp.to_pandas()
for k in "key1,key2".split(","):
assert_values_equal(pddf[k].values)
def test_groupby_apply():
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
expect_grpby = df.to_pandas().groupby(["key1", "key2"], as_index=False)
got_grpby = df.groupby(["key1", "key2"])
def foo(df):
df["out"] = df["val1"] + df["val2"]
return df
expect = expect_grpby.apply(foo)
got = got_grpby.apply(foo)
assert_eq(expect, got)
def test_groupby_apply_grouped():
from numba import cuda
np.random.seed(0)
df = DataFrame()
nelem = 20
df["key1"] = np.random.randint(0, 3, nelem)
df["key2"] = np.random.randint(0, 2, nelem)
df["val1"] = np.random.random(nelem)
df["val2"] = np.random.random(nelem)
expect_grpby = df.to_pandas().groupby(["key1", "key2"], as_index=False)
got_grpby = df.groupby(["key1", "key2"])
def foo(key1, val1, com1, com2):
for i in range(cuda.threadIdx.x, len(key1), cuda.blockDim.x):
com1[i] = key1[i] * 10000 + val1[i]
com2[i] = i
got = got_grpby.apply_grouped(
foo,
incols=["key1", "val1"],
outcols={"com1": np.float64, "com2": np.int32},
tpb=8,
)
got = got.to_pandas()
# Get expected result by emulating the operation in pandas
def emulate(df):
df["com1"] = df.key1 * 10000 + df.val1
df["com2"] = np.arange(len(df), dtype=np.int32)
return df
expect = expect_grpby.apply(emulate)
expect = expect.sort_values(["key1", "key2"])
pd.util.testing.assert_frame_equal(expect, got)
@pytest.mark.parametrize("nelem", [100, 500])
@pytest.mark.parametrize(
"func", ["mean", "std", "var", "min", "max", "count", "sum"]
)
def test_groupby_cudf_2keys_agg(nelem, func):
got_df = make_frame(DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func)
# pandas
expect_df = (
make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func)
)
check_dtype = False if func == "count" else True
assert_eq(got_df, expect_df, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_series_groupby(agg):
s = pd.Series([1, 2, 3])
g = Series([1, 2, 3])
sg = s.groupby(s // 2)
gg = g.groupby(g // 2)
sa = getattr(sg, agg)()
ga = getattr(gg, agg)()
check_dtype = False if agg == "count" else True
assert_eq(sa, ga, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_series_groupby_agg(agg):
s = pd.Series([1, 2, 3])
g = Series([1, 2, 3])
sg = s.groupby(s // 2).agg(agg)
gg = g.groupby(g // 2).agg(agg)
check_dtype = False if agg == "count" else True
assert_eq(sg, gg, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_groupby_level_zero(agg):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[0, 1, 1])
gdf = DataFrame.from_pandas(pdf)
pdg = pdf.groupby(level=0)
gdg = gdf.groupby(level=0)
pdresult = getattr(pdg, agg)()
gdresult = getattr(gdg, agg)()
check_dtype = False if agg == "count" else True
assert_eq(pdresult, gdresult, check_dtype=check_dtype)
@pytest.mark.parametrize("agg", ["min", "max", "count", "sum", "mean"])
def test_groupby_series_level_zero(agg):
pdf = pd.Series([1, 2, 3], index=[0, 1, 1])
gdf = Series.from_pandas(pdf)
pdg = pdf.groupby(level=0)
gdg = gdf.groupby(level=0)
pdresult = getattr(pdg, agg)()
gdresult = getattr(gdg, agg)()
check_dtype = False if agg == "count" else True
assert_eq(pdresult, gdresult, check_dtype=check_dtype)
def test_groupby_column_name():
pdf = pd.DataFrame({"xx": [1.0, 2.0, 3.0], "yy": [1, 2, 3]})
gdf = DataFrame.from_pandas(pdf)
g = gdf.groupby("yy")
p = pdf.groupby("yy")
gxx = g["xx"].sum()
pxx = p["xx"].sum()
assert_eq(pxx, gxx)
def test_groupby_column_numeral():
pdf = pd.DataFrame({0: [1.0, 2.0, 3.0], 1: [1, 2, 3]})
gdf = DataFrame.from_pandas(pdf)
p = pdf.groupby(1)
g = gdf.groupby(1)
pxx = p[0].sum()
gxx = g[0].sum()
assert_eq(pxx, gxx)
pdf = pd.DataFrame({0.5: [1.0, 2.0, 3.0], 1.5: [1, 2, 3]})
gdf = DataFrame.from_pandas(pdf)
p = pdf.groupby(1.5)
g = gdf.groupby(1.5)
pxx = p[0.5].sum()
gxx = g[0.5].sum()
assert_eq(pxx, gxx)
@pytest.mark.parametrize(
"series",
[[0, 1, 0], [1, 1, 1], [0, 1, 1], [1, 2, 3], [4, 3, 2], [0, 2, 0]],
) # noqa: E501
def test_groupby_external_series(series):
pdf = pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]})
gdf = DataFrame.from_pandas(pdf)
pxx = pdf.groupby(pd.Series(series)).x.sum()
gxx = gdf.groupby(cudf.Series(series)).x.sum()
assert_eq(pxx, gxx)
@pytest.mark.parametrize("series", [[0.0, 1.0], [1.0, 1.0, 1.0, 1.0]])
def test_groupby_external_series_incorrect_length(series):
pdf = pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]})
gdf = DataFrame.from_pandas(pdf)
pxx = pdf.groupby(pd.Series(series)).x.sum()
gxx = gdf.groupby(cudf.Series(series)).x.sum()
assert_eq(pxx, gxx)
@pytest.mark.parametrize(
"level", [0, 1, "a", "b", [0, 1], ["a", "b"], ["a", 1], -1, [-1, -2]]
)
def test_groupby_levels(level):
idx = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (2, 2)], names=("a", "b"))
pdf = pd.DataFrame({"c": [1, 2, 3], "d": [2, 3, 4]}, index=idx)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.groupby(level=level).sum(), gdf.groupby(level=level).sum())
def test_advanced_groupby_levels():
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [1, 2, 1], "z": [1, 1, 1]})
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby(["x", "y"]).sum()
gdg = gdf.groupby(["x", "y"]).sum()
assert_eq(pdg, gdg)
pdh = pdg.groupby(level=1).sum()
gdh = gdg.groupby(level=1).sum()
assert_eq(pdh, gdh)
pdg = pdf.groupby(["x", "y", "z"]).sum()
gdg = gdf.groupby(["x", "y", "z"]).sum()
pdg = pdf.groupby(["z"]).sum()
gdg = gdf.groupby(["z"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["y", "z"]).sum()
gdg = gdf.groupby(["y", "z"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["x", "z"]).sum()
gdg = gdf.groupby(["x", "z"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["y"]).sum()
gdg = gdf.groupby(["y"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["x"]).sum()
gdg = gdf.groupby(["x"]).sum()
assert_eq(pdg, gdg)
pdh = pdg.groupby(level=0).sum()
gdh = gdg.groupby(level=0).sum()
assert_eq(pdh, gdh)
pdg = pdf.groupby(["x", "y"]).sum()
gdg = gdf.groupby(["x", "y"]).sum()
pdh = pdg.groupby(level=[0, 1]).sum()
gdh = gdg.groupby(level=[0, 1]).sum()
assert_eq(pdh, gdh)
pdh = pdg.groupby(level=[1, 0]).sum()
gdh = gdg.groupby(level=[1, 0]).sum()
assert_eq(pdh, gdh)
pdg = pdf.groupby(["x", "y"]).sum()
gdg = gdf.groupby(["x", "y"]).sum()
with pytest.raises(IndexError) as raises:
pdh = pdg.groupby(level=2).sum()
raises.match("Too many levels")
with pytest.raises(IndexError) as raises:
gdh = gdg.groupby(level=2).sum()
# we use a different error message
raises.match("Invalid level number")
assert_eq(pdh, gdh)
@pytest.mark.parametrize(
"func",
[
pytest.param(
lambda df: df.groupby(["x", "y", "z"]).sum(),
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/32464"
),
),
lambda df: df.groupby(["x", "y"]).sum(),
lambda df: df.groupby(["x", "y"]).agg("sum"),
lambda df: df.groupby(["y"]).sum(),
lambda df: df.groupby(["y"]).agg("sum"),
lambda df: df.groupby(["x"]).sum(),
lambda df: df.groupby(["x"]).agg("sum"),
lambda df: df.groupby(["x", "y"]).z.sum(),
lambda df: df.groupby(["x", "y"]).z.agg("sum"),
],
)
def test_empty_groupby(func):
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
gdf = cudf.from_pandas(pdf)
assert_eq(func(pdf), func(gdf), check_index_type=False)
def test_groupby_unsupported_columns():
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(np.random.choice(["a", "b", 1], 3), dtype="category")
)
pdf = pd.DataFrame(
{
"x": [1, 2, 3],
"y": ["a", "b", "c"],
"z": ["d", "e", "f"],
"a": [3, 4, 5],
}
)
pdf["b"] = pd_cat
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby("x").sum()
gdg = gdf.groupby("x").sum()
assert_eq(pdg, gdg)
def test_list_of_series():
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [1, 2, 1]})
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby([pdf.x]).y.sum()
gdg = gdf.groupby([gdf.x]).y.sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby([pdf.x, pdf.y]).y.sum()
gdg = gdf.groupby([gdf.x, gdf.y]).y.sum()
pytest.skip()
assert_eq(pdg, gdg)
def test_groupby_use_agg_column_as_index():
pdf = pd.DataFrame()
pdf["a"] = [1, 1, 1, 3, 5]
gdf = cudf.DataFrame()
gdf["a"] = [1, 1, 1, 3, 5]
pdg = pdf.groupby("a").agg({"a": "count"})
gdg = gdf.groupby("a").agg({"a": "count"})
assert_eq(pdg, gdg, check_dtype=False)
def test_groupby_list_then_string():
gdf = cudf.DataFrame()
gdf["a"] = [0, 1, 0, 1, 2]
gdf["b"] = [11, 2, 15, 12, 2]
gdf["c"] = [6, 7, 6, 7, 6]
pdf = gdf.to_pandas()
gdg = gdf.groupby("a", as_index=True).agg(
{"b": ["min", "max"], "c": "max"}
)
pdg = pdf.groupby("a", as_index=True).agg(
{"b": ["min", "max"], "c": "max"}
)
assert_eq(gdg, pdg)
def test_groupby_different_unequal_length_column_aggregations():
gdf = cudf.DataFrame()
gdf["a"] = [0, 1, 0, 1, 2]
gdf["b"] = [11, 2, 15, 12, 2]
gdf["c"] = [11, 2, 15, 12, 2]
pdf = gdf.to_pandas()
gdg = gdf.groupby("a", as_index=True).agg(
{"b": "min", "c": ["max", "min"]}
)
pdg = pdf.groupby("a", as_index=True).agg(
{"b": "min", "c": ["max", "min"]}
)
assert_eq(pdg, gdg)
def test_groupby_single_var_two_aggs():
gdf = cudf.DataFrame()
gdf["a"] = [0, 1, 0, 1, 2]
gdf["b"] = [11, 2, 15, 12, 2]
gdf["c"] = [11, 2, 15, 12, 2]
pdf = gdf.to_pandas()
gdg = gdf.groupby("a", as_index=True).agg({"b": ["min", "max"]})
pdg = pdf.groupby("a", as_index=True).agg({"b": ["min", "max"]})
assert_eq(pdg, gdg)
def test_groupby_double_var_two_aggs():
gdf = cudf.DataFrame()
gdf["a"] = [0, 1, 0, 1, 2]
gdf["b"] = [11, 2, 15, 12, 2]
gdf["c"] = [11, 2, 15, 12, 2]
pdf = gdf.to_pandas()
gdg = gdf.groupby(["a", "b"], as_index=True).agg({"c": ["min", "max"]})
pdg = pdf.groupby(["a", "b"], as_index=True).agg({"c": ["min", "max"]})
assert_eq(pdg, gdg)
def test_groupby_apply_basic_agg_single_column():
gdf = DataFrame()
gdf["key"] = [0, 0, 1, 1, 2, 2, 0]
gdf["val"] = [0, 1, 2, 3, 4, 5, 6]
gdf["mult"] = gdf["key"] * gdf["val"]
pdf = gdf.to_pandas()
gdg = gdf.groupby(["key", "val"]).mult.sum()
pdg = pdf.groupby(["key", "val"]).mult.sum()
assert_eq(pdg, gdg)
def test_groupby_multi_agg_single_groupby_series():
pdf = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=10000),
"y": np.random.normal(size=10000),
}
)
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby("x").y.agg(["sum", "max"])
gdg = gdf.groupby("x").y.agg(["sum", "max"])
assert_eq(pdg, gdg)
def test_groupby_multi_agg_multi_groupby():
pdf = pd.DataFrame(
{
"a": np.random.randint(0, 5, 10),
"b": np.random.randint(0, 5, 10),
"c": np.random.randint(0, 5, 10),
"d": np.random.randint(0, 5, 10),
}
)
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby(["a", "b"]).agg(["sum", "max"])
gdg = gdf.groupby(["a", "b"]).agg(["sum", "max"])
assert_eq(pdg, gdg)
def test_groupby_datetime_multi_agg_multi_groupby():
from datetime import datetime, timedelta
pdf = pd.DataFrame(
{
"a": pd.date_range(
datetime.now(), datetime.now() + timedelta(9), freq="D"
),
"b": np.random.randint(0, 5, 10),
"c": np.random.randint(0, 5, 10),
"d": np.random.randint(0, 5, 10),
}
)
gdf = cudf.from_pandas(pdf)
pdg = pdf.groupby(["a", "b"]).agg(["sum", "max"])
gdg = gdf.groupby(["a", "b"]).agg(["sum", "max"])
assert_eq(pdg, gdg)
@pytest.mark.parametrize("agg", ["min", "max", "sum", "count", "mean"])
def test_groupby_nulls_basic(agg):
check_dtype = False if agg == "count" else True
pdf = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": [1, 2, 1, 2, 1, None]})
gdf = cudf.from_pandas(pdf)
assert_eq(
getattr(pdf.groupby("a"), agg)(),
getattr(gdf.groupby("a"), agg)(),
check_dtype=check_dtype,
)
pdf = pd.DataFrame(
{
"a": [0, 0, 1, 1, 2, 2],
"b": [1, 2, 1, 2, 1, None],
"c": [1, 2, 1, None, 1, 2],
}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
getattr(pdf.groupby("a"), agg)(),
getattr(gdf.groupby("a"), agg)(),
check_dtype=check_dtype,
)
pdf = pd.DataFrame(
{
"a": [0, 0, 1, 1, 2, 2],
"b": [1, 2, 1, 2, 1, None],
"c": [1, 2, None, None, 1, 2],
}
)
gdf = cudf.from_pandas(pdf)
# TODO: fillna() used here since we don't follow
# Pandas' null semantics. Should we change it?
assert_eq(
getattr(pdf.groupby("a"), agg)().fillna(0),
getattr(gdf.groupby("a"), agg)().fillna(0),
check_dtype=check_dtype,
)
def test_groupby_nulls_in_index():
pdf = pd.DataFrame({"a": [None, 2, 1, 1], "b": [1, 2, 3, 4]})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.groupby("a").sum(), gdf.groupby("a").sum())
def test_groupby_all_nulls_index():
gdf = cudf.DataFrame(
{
"a": cudf.Series([None, None, None, None], dtype="object"),
"b": [1, 2, 3, 4],
}
)
pdf = gdf.to_pandas()
assert_eq(pdf.groupby("a").sum(), gdf.groupby("a").sum())
gdf = cudf.DataFrame(
{"a": cudf.Series([np.nan, np.nan, np.nan, np.nan]), "b": [1, 2, 3, 4]}
)
pdf = gdf.to_pandas()
assert_eq(pdf.groupby("a").sum(), gdf.groupby("a").sum())
def test_groupby_sort():
pdf = pd.DataFrame({"a": [2, 2, 1, 1], "b": [1, 2, 3, 4]})
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.groupby("a", sort=False).sum().sort_index(),
gdf.groupby("a", sort=False).sum().sort_index(),
)
pdf = pd.DataFrame(
{"c": [-1, 2, 1, 4], "b": [1, 2, 3, 4], "a": [2, 2, 1, 1]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.groupby(["c", "b"], sort=False).sum().sort_index(),
gdf.groupby(["c", "b"], sort=False).sum().to_pandas().sort_index(),
)
def test_groupby_cat():
pdf = pd.DataFrame(
{"a": [1, 1, 2], "b": pd.Series(["b", "b", "a"], dtype="category")}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.groupby("a").count(), gdf.groupby("a").count(), check_dtype=False
)
def test_groupby_index_type():
df = cudf.DataFrame()
df["string_col"] = ["a", "b", "c"]
df["counts"] = [1, 2, 3]
res = df.groupby(by="string_col").counts.sum()
assert isinstance(res.index, cudf.core.index.StringIndex)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize("q", [0.25, 0.4, 0.5, 0.7, 1])
def test_groupby_quantile(interpolation, q):
raw_data = {
"y": [None, 1, 2, 3, 4, None, 6, 7, 8, 9],
"x": [1, 2, 3, 1, 2, 2, 1, None, 3, 2],
}
# Pandas>0.25 now casts NaN in quantile operations as a float64
# # so we are filling with zeros.
pdf = pd.DataFrame(raw_data).fillna(0)
gdf = DataFrame.from_pandas(pdf)
pdg = pdf.groupby("x")
gdg = gdf.groupby("x")
pdresult = pdg.quantile(q, interpolation=interpolation)
gdresult = gdg.quantile(q, interpolation=interpolation)
# There's a lot left to add to python bindings like index name
# so this is a temporary workaround
pdresult = pdresult["y"].reset_index(drop=True)
gdresult = gdresult["y"].reset_index(drop=True)
if q == 0.5 and interpolation == "nearest":
pytest.xfail(
"Pandas NaN Rounding will fail nearest interpolation at 0.5"
)
assert_eq(pdresult, gdresult)
def test_groupby_std():
raw_data = {
"x": [1, 2, 3, 1, 2, 2, 1, None, 3, 2],
"y": [None, 1, 2, 3, 4, None, 6, 7, 8, 9],
}
pdf = pd.DataFrame(raw_data)
gdf = DataFrame.from_pandas(pdf)
pdg = pdf.groupby("x")
gdg = gdf.groupby("x")
pdresult = pdg.std()
gdresult = gdg.std()
# There's a lot left to add to python bindings like index name
# so this is a temporary workaround
pdresult = pdresult["y"].reset_index(drop=True)
gdresult = gdresult["y"].reset_index(drop=True)
assert_eq(pdresult, gdresult)
def test_groupby_size():
pdf = pd.DataFrame(
{
"a": [1, 1, 3, 4],
"b": ["bob", "bob", "alice", "cooper"],
"c": [1, 2, 3, 4],
}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.groupby("a").size(), gdf.groupby("a").size(), check_dtype=False
)
assert_eq(
pdf.groupby(["a", "b", "c"]).size(),
gdf.groupby(["a", "b", "c"]).size(),
check_dtype=False,
)
sr = pd.Series(range(len(pdf)))
assert_eq(
pdf.groupby(sr).size(), gdf.groupby(sr).size(), check_dtype=False
)
@pytest.mark.parametrize("nelem", get_nelem())
@pytest.mark.parametrize("as_index", [True, False])
@pytest.mark.parametrize("agg", ["min", "max", "mean", "count"])
def test_groupby_datetime(nelem, as_index, agg):
if agg == "mean" and as_index is True:
return
check_dtype = agg not in ("mean", "count")
pdf = make_frame(pd.DataFrame, nelem=nelem, with_datetime=True)
gdf = make_frame(cudf.DataFrame, nelem=nelem, with_datetime=True)
pdg = pdf.groupby("datetime", as_index=as_index)
gdg = gdf.groupby("datetime", as_index=as_index)
if as_index is False:
pdres = getattr(pdg, agg)()
gdres = getattr(gdg, agg)()
else:
pdres = pdg.agg({"datetime": agg})
gdres = gdg.agg({"datetime": agg})
assert_eq(pdres, gdres, check_dtype=check_dtype)
def test_groupby_dropna():
df = cudf.DataFrame({"a": [1, 1, None], "b": [1, 2, 3]})
expect = cudf.DataFrame(
{"b": [3, 3]}, index=cudf.Series([1, None], name="a")
)
got = df.groupby("a", dropna=False).sum()
assert_eq(expect, got)
df = cudf.DataFrame(
{"a": [1, 1, 1, None], "b": [1, None, 1, None], "c": [1, 2, 3, 4]}
)
idx = cudf.MultiIndex.from_frame(
df[["a", "b"]].drop_duplicates().sort_values(["a", "b"]),
names=["a", "b"],
)
expect = cudf.DataFrame({"c": [4, 2, 4]}, index=idx)
got = df.groupby(["a", "b"], dropna=False).sum()
assert_eq(expect, got)
def test_groupby_dropna_getattr():
df = cudf.DataFrame()
df["id"] = [0, 1, 1, None, None, 3, 3]
df["val"] = [0, 1, 1, 2, 2, 3, 3]
got = df.groupby("id", dropna=False).val.sum()
expect = cudf.Series(
[0, 2, 6, 4], name="val", index=cudf.Series([0, 1, 3, None], name="id")
)
assert_eq(expect, got)
def test_groupby_categorical_from_string():
gdf = cudf.DataFrame()
gdf["id"] = ["a", "b", "c"]
gdf["val"] = [0, 1, 2]
gdf["id"] = gdf["id"].astype("category")
assert_eq(
cudf.DataFrame({"val": gdf["val"]}).set_index(index=gdf["id"]),
gdf.groupby("id").sum(),
)
def test_groupby_arbitrary_length_series():
gdf = cudf.DataFrame({"a": [1, 1, 2], "b": [2, 3, 4]}, index=[4, 5, 6])
gsr = cudf.Series([1.0, 2.0, 2.0], index=[3, 4, 5])
pdf = gdf.to_pandas()
psr = gsr.to_pandas()
expect = pdf.groupby(psr).sum()
got = gdf.groupby(gsr).sum()
assert_eq(expect, got)
def test_groupby_series_same_name_as_dataframe_column():
gdf = cudf.DataFrame({"a": [1, 1, 2], "b": [2, 3, 4]}, index=[4, 5, 6])
gsr = cudf.Series([1.0, 2.0, 2.0], name="a", index=[3, 4, 5])
pdf = gdf.to_pandas()
psr = gsr.to_pandas()
expect = pdf.groupby(psr).sum()
got = gdf.groupby(gsr).sum()
assert_eq(expect, got)
def test_group_by_series_and_column_name_in_by():
gdf = cudf.DataFrame(
{"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]}, index=[1, 2, 3]
)
gsr0 = cudf.Series([0.0, 1.0, 2.0], name="a", index=[1, 2, 3])
gsr1 = cudf.Series([0.0, 1.0, 3.0], name="b", index=[3, 4, 5])
pdf = gdf.to_pandas()
psr0 = gsr0.to_pandas()
psr1 = gsr1.to_pandas()
expect = pdf.groupby(["x", psr0, psr1]).sum()
got = gdf.groupby(["x", gsr0, gsr1]).sum()
assert_eq(expect, got)
@pytest.mark.parametrize(
"grouper",
[
"a",
["a"],
["a", "b"],
np.array([0, 1, 1, 2, 3, 2]),
{0: "a", 1: "a", 2: "b", 3: "a", 4: "b", 5: "c"},
lambda x: x + 1,
["a", np.array([0, 1, 1, 2, 3, 2])],
],
)
def test_grouping(grouper):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 3],
"b": [1, 2, 1, 2, 1, 2],
"c": [1, 2, 3, 4, 5, 6],
}
)
gdf = cudf.from_pandas(pdf)
for pdf_group, gdf_group in zip(
pdf.groupby(grouper), gdf.groupby(grouper)
):
assert pdf_group[0] == gdf_group[0]
assert_eq(pdf_group[1], gdf_group[1])
@pytest.mark.parametrize("agg", [lambda x: x.count(), "count"])
@pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]])
def test_groupby_count(agg, by):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5]}
)
gdf = cudf.from_pandas(pdf)
expect = pdf.groupby(by).agg(agg)
got = gdf.groupby(by).agg(agg)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("agg", [lambda x: x.median(), "median"])
@pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]])
def test_groupby_median(agg, by):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5]}
)
gdf = cudf.from_pandas(pdf)
expect = pdf.groupby(by).agg(agg)
got = gdf.groupby(by).agg(agg)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("agg", [lambda x: x.nunique(), "nunique"])
@pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]])
def test_groupby_nunique(agg, by):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5]}
)
gdf = cudf.from_pandas(pdf)
expect = pdf.groupby(by).nunique()
got = gdf.groupby(by).nunique()
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"n", [0, 1, 2, 10],
)
@pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]])
def test_groupby_nth(n, by):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 3],
"b": [1, 2, 2, 2, 1],
"c": [1, 2, None, 4, 5],
"d": ["a", "b", "c", "d", "e"],
}
)
gdf = cudf.from_pandas(pdf)
expect = pdf.groupby(by).nth(n)
got = gdf.groupby(by).nth(n)
assert_eq(expect, got, check_dtype=False)
def test_raise_data_error():
pdf = pd.DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]})
gdf = cudf.from_pandas(pdf)
# we have to test that Pandas does this too:
try:
pdf.groupby("a").mean()
except Exception as e:
typ = type(e)
msg = str(e)
with pytest.raises(typ, match=msg):
gdf.groupby("a").mean()
def test_drop_unsupported_multi_agg():
gdf = cudf.DataFrame(
{"a": [1, 1, 2, 2], "b": [1, 2, 3, 4], "c": ["a", "b", "c", "d"]}
)
assert_eq(
gdf.groupby("a").agg(["count", "mean"]),
gdf.groupby("a").agg({"b": ["count", "mean"], "c": ["count"]}),
)
@pytest.mark.parametrize(
"agg",
(
list(itertools.combinations(["count", "max", "min", "nunique"], 2))
+ [
{"b": "min", "c": "mean"},
{"b": "max", "c": "mean"},
{"b": "count", "c": "mean"},
{"b": "nunique", "c": "mean"},
]
),
)
def test_groupby_agg_combinations(agg):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3],
"b": ["a", "a", "b", "c", "d"],
"c": [1, 2, 3, 4, 5],
}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.groupby("a").agg(agg), gdf.groupby("a").agg(agg), check_dtype=False
)
def test_groupby_apply_noempty_group():
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2], "b": [1, 2, 1, 2], "c": [1, 2, 3, 4]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.groupby("a")
.apply(lambda x: x.iloc[[0, 1]])
.reset_index(drop=True),
gdf.groupby("a")
.apply(lambda x: x.iloc[[0, 1]])
.reset_index(drop=True),
)
|
py | b40fc1d19d0be161f1e1472bf20e1413542e0daa | import os
from flask import Flask, flash, request, redirect, url_for, render_template, session, Session
import time
import facerecognition as face
UPLOAD_FOLDER = 'uploads'
app = Flask(__name__)
sess = Session()
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
def detections():
return face.run()
@app.route('/', methods=['GET', 'POST'])
def hello_world():
if request.method == 'GET':
return render_template('index.html')
if request.method == 'POST':
if 'file' not in request.files:
flash('File not uploaded')
return render_template('index.html')
file = request.files['file']
if file.filename == '':
flash('No selected file')
return render_template('index.html')
file.filename='sample.jpg'
file.save('uploads/sample.jpg')
return render_template('result.html', result=detections())
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
sess.init_app(app)
app.debug = True
app.run() |
py | b40fc3a4b96a72376e44d0856d71a3e1c9d4713c | import datetime as dt
def dt_to_str(dt_seconds):
"""
Converts delta time into string "hh:mm:ss"
"""
return str(dt.timedelta(seconds=dt_seconds))
|
py | b40fc3e6bf0be1947feda123c980ce803e69257a | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage operator."""
import warnings
from typing import Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
WILDCARD = '*'
class GCSToGCSOperator(BaseOperator):
"""
Copies objects from a bucket to another, with renaming if requested.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGCSOperator`
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:type source_bucket: str
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:type source_object: str
:param source_objects: A list of source name of the objects to copy in the Google cloud
storage bucket. (templated)
:type source_objects: List[str]
:param destination_bucket: The destination Google Cloud Storage bucket
where the object should be. If the destination_bucket is None, it defaults
to source_bucket. (templated)
:type destination_bucket: str
:param destination_object: The destination name of the object in the
destination Google Cloud Storage bucket. (templated)
If a wildcard is supplied in the source_object argument, this is the
prefix that will be prepended to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the
file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
the destination_object as e.g. ``blah/foo``, in which case the copied file
will be named ``blah/foo/baz``.
The same thing applies to source objects inside source_objects.
:type destination_object: str
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:type move_object: bool
:param replace: Whether you want to replace existing destination files or not.
:type replace: bool
:param delimiter: This is used to restrict the result to only the 'files' in a given 'folder'.
If source_objects = ['foo/bah/'] and delimiter = '.avro', then only the 'files' in the
folder 'foo/bah/' with '.avro' delimiter will be copied to the destination object.
:type delimiter: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param last_modified_time: When specified, the objects will be copied or moved,
only if they were modified after last_modified_time.
If tzinfo has not been set, UTC will be assumed.
:type last_modified_time: datetime.datetime
:param maximum_modified_time: When specified, the objects will be copied or moved,
only if they were modified before maximum_modified_time.
If tzinfo has not been set, UTC will be assumed.
:type maximum_modified_time: datetime.datetime
:param is_older_than: When specified, the objects will be copied if they are older
than the specified time in seconds.
:type is_older_than: int
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:Example:
The following Operator would copy a single file named
``sales/sales-2017/january.avro`` in the ``data`` bucket to the file named
``copied_sales/2017/january-backup.avro`` in the ``data_backup`` bucket ::
copy_single_file = GCSToGCSOperator(
task_id='copy_single_file',
source_bucket='data',
source_objects=['sales/sales-2017/january.avro'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/january-backup.avro',
gcp_conn_id=google_cloud_conn_id
)
The following Operator would copy all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
``copied_sales/2017`` folder in the ``data_backup`` bucket. ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_objects=['sales/sales-2017'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
delimiter='.avro'
gcp_conn_id=google_cloud_conn_id
)
Or ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
same folder in the ``data_backup`` bucket, deleting the original files in the
process. ::
move_files = GCSToGCSOperator(
task_id='move_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
move_object=True,
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2019``
and ``sales/sales-2020` folder in ``data`` bucket to the same folder in the
``data_backup`` bucket, deleting the original files in the process. ::
move_files = GCSToGCSOperator(
task_id='move_files',
source_bucket='data',
source_objects=['sales/sales-2019/*.avro', 'sales/sales-2020'],
destination_bucket='data_backup',
delimiter='.avro',
move_object=True,
gcp_conn_id=google_cloud_conn_id
)
"""
template_fields = (
'source_bucket',
'source_object',
'source_objects',
'destination_bucket',
'destination_object',
'delimiter',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
*, # pylint: disable=too-many-arguments
source_bucket,
source_object=None,
source_objects=None,
destination_bucket=None,
destination_object=None,
delimiter=None,
move_object=False,
replace=True,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
last_modified_time=None,
maximum_modified_time=None,
is_older_than=None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
):
super().__init__(**kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = google_cloud_storage_conn_id
self.source_bucket = source_bucket
self.source_object = source_object
self.source_objects = source_objects
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.delimiter = delimiter
self.move_object = move_object
self.replace = replace
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.last_modified_time = last_modified_time
self.maximum_modified_time = maximum_modified_time
self.is_older_than = is_older_than
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
if self.source_objects and self.source_object:
error_msg = (
"You can either set source_object parameter or source_objects "
"parameter but not both. Found source_object={} and"
" source_objects={}".format(self.source_object, self.source_objects)
)
raise AirflowException(error_msg)
if not self.source_object and not self.source_objects:
error_msg = "You must set source_object parameter or source_objects parameter. None set"
raise AirflowException(error_msg)
if self.source_objects and not all(isinstance(item, str) for item in self.source_objects):
raise AirflowException('At least, one of the `objects` in the `source_objects` is not a string')
# If source_object is set, default it to source_objects
if self.source_object:
self.source_objects = [self.source_object]
if self.destination_bucket is None:
self.log.warning(
'destination_bucket is None. Defaulting it to source_bucket (%s)', self.source_bucket
)
self.destination_bucket = self.source_bucket
# An empty source_object means to copy all files
if len(self.source_objects) == 0:
self.source_objects = ['']
# Raise exception if empty string `''` is used twice in source_object, this is to avoid double copy
if self.source_objects.count('') > 1:
raise AirflowException("You can't have two empty strings inside source_object")
# Iterate over the source_objects and do the copy
for prefix in self.source_objects:
# Check if prefix contains wildcard
if WILDCARD in prefix:
self._copy_source_with_wildcard(hook=hook, prefix=prefix)
# Now search with prefix using provided delimiter if any
else:
self._copy_source_without_wildcard(hook=hook, prefix=prefix)
def _copy_source_without_wildcard(self, hook, prefix):
"""
For source_objects with no wildcard, this operator would first list
all files in source_objects, using provided delimiter if any. Then copy
files from source_objects to destination_object and rename each source
file.
Example 1:
The following Operator would copy all the files from ``a/``folder
(i.e a/a.csv, a/b.csv, a/c.csv)in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.csv, b/b.csv, b/c.csv) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/'],
destination_bucket='data_backup',
destination_object='b/',
gcp_conn_id=google_cloud_conn_id
)
Example 2:
The following Operator would copy all avro files from ``a/``folder
(i.e a/a.avro, a/b.avro, a/c.avro)in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.avro, b/b.avro, b/c.avro) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/'],
destination_bucket='data_backup',
destination_object='b/',
delimiter='.avro',
gcp_conn_id=google_cloud_conn_id
)
"""
objects = hook.list(self.source_bucket, prefix=prefix, delimiter=self.delimiter)
# If objects is empty and we have prefix, let's check if prefix is a blob
# and copy directly
if len(objects) == 0 and prefix:
if hook.exists(self.source_bucket, prefix):
self._copy_single_object(
hook=hook, source_object=prefix, destination_object=self.destination_object
)
for source_obj in objects:
if self.destination_object is None:
destination_object = source_obj
else:
destination_object = source_obj.replace(prefix, self.destination_object, 1)
self._copy_single_object(
hook=hook, source_object=source_obj, destination_object=destination_object
)
def _copy_source_with_wildcard(self, hook, prefix):
total_wildcards = prefix.count(WILDCARD)
if total_wildcards > 1:
error_msg = (
"Only one wildcard '*' is allowed in source_object parameter. "
"Found {} in {}.".format(total_wildcards, prefix)
)
raise AirflowException(error_msg)
self.log.info('Delimiter ignored because wildcard is in prefix')
prefix_, delimiter = prefix.split(WILDCARD, 1)
objects = hook.list(self.source_bucket, prefix=prefix_, delimiter=delimiter)
if not self.replace:
# If we are not replacing, list all files in the Destination GCS bucket
# and only keep those files which are present in
# Source GCS bucket and not in Destination GCS bucket
existing_objects = hook.list(self.destination_bucket, prefix=prefix_, delimiter=delimiter)
objects = set(objects) - set(existing_objects)
if len(objects) > 0:
self.log.info('%s files are going to be synced: %s.', len(objects), objects)
else:
self.log.info('There are no new files to sync. Have a nice day!')
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix_, self.destination_object, 1)
self._copy_single_object(
hook=hook, source_object=source_object, destination_object=destination_object
)
def _copy_single_object(self, hook, source_object, destination_object):
if self.is_older_than:
# Here we check if the given object is older than the given time
# If given, last_modified_time and maximum_modified_time is ignored
if hook.is_older_than(self.source_bucket, source_object, self.is_older_than):
self.log.info("Object is older than %s seconds ago", self.is_older_than)
else:
self.log.debug("Object is not older than %s seconds ago", self.is_older_than)
return
elif self.last_modified_time and self.maximum_modified_time:
# check to see if object was modified between last_modified_time and
# maximum_modified_time
if hook.is_updated_between(
self.source_bucket, source_object, self.last_modified_time, self.maximum_modified_time
):
self.log.info(
"Object has been modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
else:
self.log.debug(
"Object was not modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
return
elif self.last_modified_time is not None:
# Check to see if object was modified after last_modified_time
if hook.is_updated_after(self.source_bucket, source_object, self.last_modified_time):
self.log.info("Object has been modified after %s ", self.last_modified_time)
else:
self.log.debug("Object was not modified after %s ", self.last_modified_time)
return
elif self.maximum_modified_time is not None:
# Check to see if object was modified before maximum_modified_time
if hook.is_updated_before(self.source_bucket, source_object, self.maximum_modified_time):
self.log.info("Object has been modified before %s ", self.maximum_modified_time)
else:
self.log.debug("Object was not modified before %s ", self.maximum_modified_time)
return
self.log.info(
'Executing copy of gs://%s/%s to gs://%s/%s',
self.source_bucket,
source_object,
self.destination_bucket,
destination_object,
)
hook.rewrite(self.source_bucket, source_object, self.destination_bucket, destination_object)
if self.move_object:
hook.delete(self.source_bucket, source_object)
|
py | b40fc5199cdcf9530eaf04a46fcf29e12ce6b526 | import tensorflow as tf
import numpy as np
import mnist_data
import vae
""" parameters """
model_no = '299'
IMAGE_SIZE_MNIST = 28
n_hidden = 500
dim_img = IMAGE_SIZE_MNIST**2 # number of pixels for a MNIST image
dim_z = 10
""" build graph """
# input placeholders
x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')
y = tf.placeholder(tf.float32, shape=[None, mnist_data.NUM_LABELS], name='target_labels')
# dropout
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# network architecture
rec_loss = vae.autoencoder_rec_loss(x, y, dim_img, dim_z, n_hidden, keep_prob)
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver = tf.train.import_meta_graph('models/mnist_gan.ckpt-'+model_no+'.meta')
saver.restore(sess, './models/mnist_gan.ckpt-'+model_no)
def OneHot(X, n=10, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def compute_avg_rec_error(x_sample, y_sample, repeats, n=3):
y_sample = OneHot(y_sample)
x_repeated = np.repeat([x_sample],repeats,axis=0)
y_repeated =np.repeat(y_sample,repeats,axis=0)
avg_dist = 0.0
for i in range(n):
avg_dist = avg_dist + sess.run(rec_loss, feed_dict={x: x_repeated, y: y_repeated, keep_prob : 1})
return avg_dist/n |
py | b40fc528f090ca0b2c95ff898aeb4006e0c34dca | from model.project import Project
def test_add_project(app):
app.session.login("administrator", "root")
old_projects = app.project.get_project_list()
project = Project(name=app.project.random_name(), description=app.project.random_description())
app.project.add(project)
new_projects = app.project.get_project_list()
assert len(old_projects) + 1 == len(new_projects)
old_projects.append(project)
assert sorted(old_projects, key=Project.name) == sorted(new_projects, key=Project.name)
assert app.soap.is_project_added("administrator", "root", project)
|
py | b40fc54ec3289c3d9bdf30f389975331d72d1b4e | from migen import *
from migen.genlib.io import CRG
from targets import *
from litex.build.generic_platform import *
from litex.build.xilinx.platform import XilinxPlatform
from litex.soc.integration.soc_core import SoCCore
from litex.soc.cores.uart import UARTWishboneBridge
from litescope import LiteScopeAnalyzer
_io = [
("sys_clock", 0, Pins(1)),
("sys_reset", 1, Pins(1)),
("serial", 0,
Subsignal("tx", Pins(1)),
Subsignal("rx", Pins(1)),
),
("bus", 0, Pins(128))
]
class CorePlatform(XilinxPlatform):
name = "core"
default_clk_name = "sys_clk"
def __init__(self):
XilinxPlatform.__init__(self, "", _io)
def do_finalize(self, *args, **kwargs):
pass
class Core(SoCCore):
platform = CorePlatform()
csr_map = {
"analyzer": 16
}
csr_map.update(SoCCore.csr_map)
def __init__(self, platform, clk_freq=100*1000000):
self.clock_domains.cd_sys = ClockDomain("sys")
self.comb += [
self.cd_sys.clk.eq(platform.request("sys_clock")),
self.cd_sys.rst.eq(platform.request("sys_reset"))
]
SoCCore.__init__(self, platform, clk_freq,
cpu_type=None,
csr_data_width=32,
with_uart=False,
ident="Litescope example design",
with_timer=False
)
self.add_cpu_or_bridge(UARTWishboneBridge(platform.request("serial"), clk_freq, baudrate=115200))
self.add_wb_master(self.cpu_or_bridge.wishbone)
self.bus = platform.request("bus")
self.submodules.analyzer = LiteScopeAnalyzer((self.bus), 512)
default_subtarget = Core
|
py | b40fc6513a45099af910e2cb6cb717e3c7045f0f | #!/usr/bin/env python
import os, sys, argparse, logging, json
from tempfile import NamedTemporaryFile
from collections import namedtuple
from pbsuite.utils.setupLogging import setupLogging
from pbsuite.utils.FileHandlers import FastqFile, M5File, M4File, revComp
from pbsuite.jelly.Support import AlignmentConnector, SUPPORTFLAGS
from pbsuite.banana.Polish import *
import pbsuite.jelly.m4pie as m4pie
ALLTEMPFILES = []
MINTAIL = 200
GAPWIGGLE = 400 # max deviation from gapsize a span seq's fill can be
def blasr(query, target, fmt="5", bestn=20, nCandidates=20, nproc = 1, outname = "out.m5"):
"""
Simple overlapper
"""
c = ("blasr %s %s -m %s --bestn %d --nCandidates %d --minMatch 8 --sdpTupleSize 6 --affineAlign "
"--nproc %d --noSplitSubreads --out %s --minPctSimilarity 60 --minReadLength 5") % \
(query, target, fmt, bestn, nCandidates, nproc, outname)
logging.debug(c)
r,o,e = exe(c)
logging.debug("blasr - %d - %s - %s" % (r, o, e))
def tailblasr(query, target, nproc=1, outname="out.m5", basedir="./"):
"""
Try getting the read to hit each target uniquely instead of hoping that bestn reports all possible alignments
"""
global ALLTEMPFILES
#input reads
reads = FastqFile(query)
#map to make the primary
primary= NamedTemporaryFile(prefix="primary_", suffix=".m4", delete=False, dir=basedir)
primary = primary.name
ALLTEMPFILES.append(primary)
blasr(query, target, fmt="4", nproc=nproc, bestn=1, outname=primary)
#build command to call m4pie
args = "%s %s %s -t %d -n %d -o %s" % (primary, query, target, MINTAIL, nproc, outname)
args = args.split()
m4pie.run(args)
def oldtails():
aligns = M5File(primary)
#where I'm putting the good hits
mapOut = open(outname, "w")
#where I'm putting the tails
tfq = NamedTemporaryFile(prefix="tails_", suffix=".fastq", delete=False, dir=basedir)
ALLTEMPFILES.append( tfq.name )
whichEnd = defaultdict(list)
#extract the tails
ntails = 0
for a in aligns:
if a.qstart >= MINTAIL:
tseq1 = reads[a.qname].subSeq(None, a.qstart)
#prolog
tseq1.name = "%s_::_5_::_%d,%d" % (tseq1.name, a.qstart, a.qseqlength)
tfq.write(str(tseq1))
ntails += 1
if a.qend - a.qseqlength > MINTAIL:
tseq2 = reads[a.qname].subSeq(a.qend, None)
#epilog
tseq2.name = "%s_::_3_::_%d,%d" % (tseq2.name, a.qend, a.qseqlength)
tfq.write(str(tseq2))
ntails += 1
mapOut.write(str(a)+"\n")
#don't want redundant hits on a single flank
whichEnd[a.qname].append(a.tname)
tfq.close()
logging.info("%d unmapped tails" % (ntails))
#map tails
tailAlign = NamedTemporaryFile(prefix="tails_", suffix=".m5", delete=False, dir=basedir)
tailAlign = tailAlign.name
ALLTEMPFILES.append(tailAlign)
blasr(tfq.name, target, nproc=nproc, bestn=1, outname=tailAlign)
aligns2 = M5File(tailAlign)
logging.info("%d tails mapped" % len(aligns2))
for a in aligns2:
#get the carryon info
name, direct, se = a.qname.split("_::_")
pos, length = map(int, se.split(','))
#correct it's information
a.qname = name
a.qseqlength = length
#prevent redundant flank map
if a.tname in whichEnd[a.qname]:
logging.info("%s failed ref map" % a.tname)
continue
whichEnd[a.qname].append(a.tname)
#epilogs need to be updated
if direct == '3':
a.qstart += pos
a.qend += pos
mapOut.write(str(a)+"\n")
mapOut.close()
return
def extractFlanks(reads, basedir="./"):
"""
Takes FastqFile and separates the the reference reads (^ref)
from the supporting reads
returns queryFileName, targetFileName
"""
global ALLTEMPFILES
query = NamedTemporaryFile(prefix="query_", suffix=".fastq", delete=False, dir=basedir)
ALLTEMPFILES.append(query.name)
target = NamedTemporaryFile(prefix="target_", suffix=".fasta", delete=False, dir=basedir)
ALLTEMPFILES.append(target.name)
for read in reads:
if read.startswith("ref"):
target.write(">%s\n%s\n" % (read, reads[read].seq))
else:
query.write(reads[read].toString())
query.close()
target.close()
return query.name, target.name
def orderSeeds(seedNames):
"""
Looks at the seed's names to figure out
which one is upstream of the next and if alignments
should be on the same strand
"""
if len(seedNames) == 1:
seedNames.append(None)
return True, seedNames
seed1, seed2 = seedNames
logging.debug("Ordering %s and %s" % (seed1, seed2))
if seed1 == None:
logging.error("Seed1 must be non-None to AssessAssembly!")
exit(5)
#I will be returning a None, just need to know
#if seed1 is trying to extend 5' or 3'
if seed2 == None:
sameStrand = True
if seed1.endswith("e3"):
ret = (None, seed1)
elif seed1.endswith("e5"):
ret = (seed1, None)
elif seed1.endswith("e3") and seed2.endswith("e5"):
sameStrand = True
ret = (seed1, seed2)
elif seed1.endswith("e5") and seed2.endswith("e3"):
sameStrand = True
ret = (seed2, seed1)
else:
#if seed1.endswith("e5") and seed2.endswith("e5"):
#if seed1.endswith("e3") and seed2.endswith("e3"):
#No way to know. Someone is reverse-compliment of
#the other. -- One needs to be on the opposite Strand
sameStrand = False
ret = (seed1, seed2)
logging.debug(("Seed Order %s - %s : strand -" % ret) + \
str(sameStrand))
return sameStrand, ret
def createStats():
"""
I just wanted to separate the stats so It is a little cleaner
"""
#span, seed1, seed2
return {"support": [[], [], []], #keep all the flags I have \
"spanCount": 0,
"spanSeedName": None,
"spanSeedScore": 0,
"spanSeedStart": None,
"spanSeedEnd": None,
"spanSeedStrand1": None,
"spanSeedStrand2": None,
"avgSpanBases": 0,
"seed1": None,
"seed2": None,
"predictedGapSize": None,
"sameStrand": None,
"extendF1Count": 0,
"avgExtF1Bases": 0,
"extendF1SeedName": 0,
"extendF1SeedScore": 0,
"extendF1SeedStart": None,
"extendF1SeedEnd": None,
"extendF1SeedStrand": None,
"extendF2Count": 0,
"avgExtF2Bases": 0,
"extendF2SeedName": 0,
"extendF2SeedScore": 0,
"extendF2SeedStart": None,
"extendF2SeedEnd": None,
"extendF2SeedStrand": None,
"extendSeq1": None,
"extendSeq2": None,
"fillSeq": None,
"contribSeqs": 0,
"contribBases": 0,
"fillBases": 0,
"seed1Trim": 0,
"seed2Trim": 0}
def getSubSeqs(alignmentFile, readsFile, sameStrand, seeds, predictedGapSize, maxTrim, maxWiggle, basedir="./"):
"""
Finds the seqs that align to the flanks the best, creates a fastq of supporting reads
and the seed
Might have a problem with my best read no going off the edge fully
so I put the maxFlank at 20
I should do more strand correction here
"""
global ALLTEMPFILES
def singleExtendLookup(sup, a):
"""
For getting how a single read extends a single flank
"""
if sup == SUPPORTFLAGS.none:
return None
#set the coordinates of the extending sequence
logging.debug(sup)
logging.debug(a.qname)
mystart = None
myend = None
if a.tname.endswith("e5") and sup in [SUPPORTFLAGS.left, SUPPORTFLAGS.span]:
if a.tstrand == '0':
mystart = 0
myend = a.qstart
else:
mystart = a.qend
myend = a.qseqlength
elif a.tname.endswith("e3") and sup in [SUPPORTFLAGS.right, SUPPORTFLAGS.span]:
if a.tstrand == '0':
mystart = a.qend
myend = a.qseqlength
else:
mystart = 0
myend = a.qstart
if mystart is None or myend is None or mystart < 0 or myend > a.qseqlength:
return None
#tscore = a.score * (myend - mystart)
#what flank and is it the best
if a.tname.replace('/','.') == stats["seed1"]:
stats["extendF1Count"] += 1
stats["avgExtF1Bases"] += a.qstart
stats["support"][1].append( sup )
if a.score < stats["extendF1SeedScore"]:
stats["extendF1SeedScore"] = a.score #tscore
stats["extendF1SeedName"] = a.qname
stats["extendF1SeedStart"] = mystart
stats["extendF1SeedEnd"] = myend
stats["extendF1SeedStrand"] = a.tstrand
return reads[a.qname].subSeq(mystart, myend)
#myOut = f1fout
elif a.tname.replace('/','.') == stats["seed2"]:
stats["extendF2Count"] += 1
stats["avgExtF2Bases"] += a.qstart
stats["support"][2].append( sup )
if a.score < stats["extendF2SeedScore"]:
stats["extendF2SeedScore"] = a.score#tscore
stats["extendF2SeedName"] = a.qname
stats["extendF2SeedStart"] = mystart
stats["extendF2SeedEnd"] = myend
stats["extendF2SeedStrand"] = a.tstrand
return reads[a.qname].subSeq(mystart, myend)
#myOut = f2fout
#myOut.write(str(reads[a.qname].subSeq(mystart, myend)))
return None
connector = AlignmentConnector()
#aligns = connector.parseAlignments(M5File(alignmentFile))
#no need to connect with the tailmap
aligns = defaultdict(list)
for a in M4File(alignmentFile):
aligns[a.qname].append(a)
aligns = aligns.values()
reads = FastqFile(readsFile)
stats = createStats()
stats["seed1"], stats["seed2"] = seeds
stats["sameStrand"] = sameStrand
bestSpan = None
bestF1E = None
bestF2E = None
for readGroup in aligns:
if len(readGroup) > 2:
best = 0
worst = 0
keep = []
for i in readGroup:
if i.score < best:
keep.insert(0, i)
if len(keep) >= 2:
keep.pop()
best = i.score
elif i.score < worst:
keep.insert(1,i)
if len(keep) >=2:
keep.pop()
worst = i.score
readGroup = keep
if len(readGroup) == 2:
#make sure that the two hits aren't hitting the same target
if readGroup[0].tname == readGroup[1].tname:
if readGroup[0].score <= readGroup[1].score:
del(readGroup[1])
else:
del(readGroup[0])
#hit on each flank
if len(readGroup) == 2:
r1, r2 = readGroup
if r1.tname == stats["seed2"]:
r1, r2 = r2, r1
a = connector.extendsTarget(r1, maxFlank=maxTrim, minCovers=0)
logging.debug(a)
#Also check appropriate orientation
if r1.tname.endswith('e3'):
if a not in [SUPPORTFLAGS.right, SUPPORTFLAGS.span]:
logging.debug('reset a')
a = SUPPORTFLAGS.none
elif r1.tname.endswith('e5'):
if a not in [SUPPORTFLAGS.left, SUPPORTFLAGS.span]:
logging.debug('reset a')
a = SUPPORTFLAGS.none
b = connector.extendsTarget(r2, maxFlank=maxTrim, minCovers=0)
if r2.tname.endswith('e3'):
if b not in [SUPPORTFLAGS.right, SUPPORTFLAGS.span]:
logging.debug('reset b')
b = SUPPORTFLAGS.none
elif r2.tname.endswith('e5'):
if b not in [SUPPORTFLAGS.left, SUPPORTFLAGS.span]:
logging.debug('reset b')
b = SUPPORTFLAGS.none
elif len(readGroup) == 1:
r1 = readGroup[0]
r2 = None
a = connector.extendsTarget(r1, maxFlank=10)
b = SUPPORTFLAGS.none
if r1.tname == stats["seed2"]:
r1, r2 = r2, r1
a, b = b, a
else:
logging.warning("read %s gave too many alignments" % (readGroup[0].qname))
#it extends both flanks
if a != SUPPORTFLAGS.none and b != SUPPORTFLAGS.none:
logging.debug("%s spans" % r1.qname)
logging.debug("aflag %d bflag %d" % (a,b))
logging.debug("hit1- %s (%d, %d)" % (r1.tname, r1.qstart, r1.qend))
logging.debug("hit2- %s (%d, %d)" % (r2.tname, r2.qstart, r2.qend))
rStart = min(r1.qend, r2.qend)
rEnd = max(r1.qstart, r2.qstart)
sz = rEnd - rStart
tooShort = False
if sz < 50:
logging.info("fill seq is too short to call consensus")
tooShort = True
tooShortSeq = reads[r1.qname].subSeq(rStart, rEnd)
#continue
if predictedGapSize is not None and (predictedGapSize - sz) > maxWiggle:
logging.info("fill seq size %d is smaller than allowed predicted gap size wiggle %d" % (sz, maxWiggle))
continue
#Need to ensure that it's extending in the correct orientation
#need to ensure that everything is on the right strand
if sameStrand and r1.tstrand != r2.tstrand:
logging.debug("bad strandedness")
continue
#check for negative gaps
stats["spanCount"] += 1
stats["avgSpanBases"] += rEnd - rStart
stats["support"][0].append(SUPPORTFLAGS.span)
t = reads[r1.qname].subSeq(rStart, rEnd)
#sfout.write(str(t))
#is it the best spanner
score = r1.score + r2.score
if score < stats["spanSeedScore"]:
logging.debug("scoring %s %s" % (r1.qname, r2.qname))
stats["spanSeedScore"] = score
spanSeedName = r1.qname
stats["spanSeedStrand1"] = r1.tstrand
bestSpan = reads[r1.qname].subSeq(rStart, rEnd)
stats["spanSeedName"] = r1.qname
stats["spanSeedStart"] = rStart
stats["spanSeedEnd"] = rEnd
stats["spanSeedStrand2"] = r2.tstrand
stats["spanShort"] = tooShort
if r1.tname.endswith('e5'):
stats["seed1Trim"] = r1.tstart
logging.debug('trim1 %d' % (r1.tstart))
else:
stats["seed1Trim"] = r1.tseqlength - r1.tend
logging.debug('trim1else %d' % (r1.tseqlength - r1.tend))
if r2.tname.endswith('e5'):
stats["seed2Trim"] = r2.tstart
logging.debug('trim2 %d' % (r2.tstart))
else:
stats["seed2Trim"] = r2.tseqlength - r2.tend
logging.debug('trimelse %d' % (r2.tseqlength - r2.tend))
c = singleExtendLookup(a, r1)
if c is not None:
bestF1E = c
c = singleExtendLookup(b, r2)
if c is not None:
bestF2E = c
#sfout.close()
#sfout = sfout.name
#f1fout.close()
#f1fout = f1fout.name
#f2fout.close()
#f2fout = f2fout.name
logging.info("%d reads span" % stats["spanCount"])
logging.info("%d reads extend flank 1" % stats["extendF1Count"])
logging.info("%d reads extend flank 2" % stats["extendF2Count"])
#nt = namedtuple("SubInfo", "stats spanReads flank1Reads flank2Reads spanSeed flank1Seed flank2Seed")
nt = namedtuple("SubInfo", "stats spanSeed flank1Seed flank2Seed")
#seeds out files
ssfout = None
f1sfout = None
f2sfout = None
#replace too short with N's
#if stats["spanCount"] == 0 and len(tooShort) > (stats["extendF1Count"] + stats["extendF2Count"])/2:
"""This is when I would say "oh, i'm too short - and stop early. Now, I'm still going to try to write the
short stuff and treat it like anything else. It'll be up to later processes to catch this guy.
if stats["spanCount"] != 0 and stats["spanShort"]:
#stats["avgSpanBases"] =
#stats["spanCount"] = len(tooShort)
logging.info("estimated fill len %d" % (stats["avgSpanBases"]))
logging.debug("but I'm too short")
#stats["fillSeq"] = "N"* abs(stats["spanSeedStart"] - stats["spanSeedEnd"])
stats["fillSeq"] = tooShortSeq
stats["spanSeedScore"] = -500
stats["spanSeedStrand1"] = '0'
stats["spanSeedStrand2"] = '0'
#stats["spanSeedName"] = "tooShortNs"
#ret = nt(stats, None, None, None, None, None, None)
ret = nt(stats, None, None, None)
return ret
"""
if stats["spanCount"] > 0:
stats["avgSpanBases"] = stats["avgSpanBases"]/stats["spanCount"]
logging.info("estimated fill len %d" % (stats["avgSpanBases"]))
#write seed
if len(bestSpan.seq) < 50:
logging.warning("fill sequence is small (%dbp) can't call consensus" % (len(bestSpan.seq)))
#I don't know what to return here
ssfout = NamedTemporaryFile(prefix="span_", suffix=".fasta", delete=False, dir=basedir)
ALLTEMPFILES.append(ssfout.name)
logging.debug("spanning with %s" % (bestSpan.name))
ssfout.write(">%s\n%s\n" % (bestSpan.name, bestSpan.seq))
ssfout.close()
ssfout = ssfout.name
if stats["extendF1Count"] > 0 and bestF1E is not None:
stats["avgExtF1Bases"] = stats["avgExtF1Bases"]/stats["extendF1Count"]
logging.info("estimated flank 1 extend len %d" % (stats["avgExtF1Bases"]))
#write seed
if len(bestF1E.seq) < 50:
logging.warning("f1e sequence is small (%dbp) can't call consensus" % (len(bestF1E.seq)))
#I don't know what to return here
f1sfout = NamedTemporaryFile(prefix="flank1_", suffix=".fasta", delete=False, dir=basedir)
ALLTEMPFILES.append(f1sfout.name)
f1sfout.write(">%s\n%s\n" % (bestF1E.name, bestF1E.seq))
f1sfout.close()
f1sfout = f1sfout.name
if stats["extendF2Count"] > 0 and bestF2E is not None:
stats["avgExtF2Bases"] = stats["avgExtF2Bases"]/stats["extendF2Count"]
logging.info("estimated flank 2 extend len %d" % (stats["avgExtF2Bases"]))
#write seed
if len(bestF2E.seq) < 50:
logging.warning("f2e sequence is small (%dbp) can't call consensus" % (len(bestF2E.seq)))
#I don't know what to return here
f2sfout = NamedTemporaryFile(prefix="flank2", suffix=".fasta", delete=False, dir=basedir)
ALLTEMPFILES.append(f2sfout.name)
f2sfout.write(">%s\n%s\n" % (bestF2E.name, bestF2E.seq))
f2sfout.close()
f2sfout = f2sfout.name
#all of the info I need to return... refactor later and create useful objects
#ret = nt(stats, sfout, f1fout, f2fout, ssfout, f1sfout, f2sfout)
# returns a NamedTuple with fields stats spanSeed flank1Seed flank2Seed
ret = nt(stats, ssfout, f1sfout, f2sfout)
#seeds writing
return ret
def buildFillSeq(data, inputReads, args):
"""
Using all of the information in the namedtuple returned from getSubSeqs,
go through the process of building the filling sequence.
load the filling sequence in to the data
"""
#try to build span
if SUPPORTFLAGS.span in data.stats["support"][0]:
logging.debug("build span")
alignFile = NamedTemporaryFile(prefix="scon_", suffix=".m5", delete=False, dir=args.tempDir)
alignFile.close(); alignFile = alignFile.name
ALLTEMPFILES.append(alignFile)
#blasr(data.spanReads, data.spanSeed, bestn = 1, nproc = args.nproc, outname=alignFile)
blasr(inputReads, data.spanSeed, bestn = 1, nproc = args.nproc, outname=alignFile)
aligns = M5File(alignFile)
if len(aligns) > 0:
con = consensus(aligns)
#if successful we're done
if con.contribBases > 0 and con.fillBases > 0:#must be
sequence = con.sequence#strandCorrector(data.stats["spanSeedStrand1"], con.sequence)
data.stats["fillSeq"] = sequence
data.stats["contribSeqs"] = con.contribSeqs
data.stats["contribBases"] = con.contribBases
data.stats["fillBases"] = con.fillBases
return
# this was originally just an 'else', but I was getting NoneType
# errors, so this should keep that from happening -ESR
elif data.spanSeed is not None:
logging.info("no mapping... picking span seq")
sequence = FastaFile(data.spanSeed).values()[0]
data.stats["fillSeq"] = sequence
data.stats["contribSeqs"] = 1
data.stats["contribBases"] = len(sequence)
data.stats["fillBases"] = len(sequence)
return
#no span -- we need to do flanks
flank1Success = False
flank2Success = False
logging.debug(json.dumps(data.stats, indent=4))
fl1Flag = SUPPORTFLAGS.left if data.stats["seed1"].endswith("e5") else SUPPORTFLAGS.right
if data.stats["seed2"] is not None:
fl2Flag = SUPPORTFLAGS.left if data.stats["seed2"].endswith("e5") else SUPPORTFLAGS.right
else:
fl2Flag = None
logging.debug((fl1Flag, fl2Flag))
if fl1Flag in data.stats["support"][1]:
logging.debug("build flank1 %d" % fl1Flag)
alignFile = NamedTemporaryFile(prefix="f1con_", suffix=".m5", delete=False, dir=args.tempDir)
alignFile.close(); alignFile = alignFile.name
ALLTEMPFILES.append(alignFile)
#blasr(data.flank1Reads, data.flank1Seed, bestn=1, nproc=args.nproc, outname=alignFile)
blasr(inputReads, data.flank1Seed, bestn=1, nproc=args.nproc, outname=alignFile)
aligns = M5File(alignFile)
if len(aligns) > 0:
con = consensus(aligns)
if con.contribBases > 0 and con.fillBases > 0:#must be
sequence = con.sequence#strandCorrector(data.stats["extendF1SeedStrand"], con.sequence)
data.stats["extendSeq1"] = sequence
data.stats["contribSeqs"] += con.contribSeqs
data.stats["contribBases"] += con.contribBases
data.stats["fillBases"] += con.fillBases
flank1Success = True
elif data.flank1Seed is not None:
logging.info("no mapping... picking f1 seq")
sequence = FastaFile(data.flank1Seed).values()[0]
data.stats["extendSeq1"] = sequence
data.stats["contribSeqs"] = 1
data.stats["contribBases"] = len(sequence)
data.stats["fillBases"] = len(sequence)
flank1Success = True
if fl2Flag in data.stats["support"][2]:
logging.debug("build flank2 %d" % fl2Flag)
alignFile = NamedTemporaryFile(prefix="f2con_", suffix=".m5", delete=False, dir=args.tempDir)
alignFile.close(); alignFile = alignFile.name
ALLTEMPFILES.append(alignFile)
#blasr(data.flank2Reads, data.flank2Seed, bestn=1, nproc=args.nproc, outname=alignFile)
blasr(inputReads, data.flank2Seed, bestn=1, nproc=args.nproc, outname=alignFile)
aligns = M5File(alignFile)
if len(aligns) > 0:
con = consensus(aligns)
if con.contribBases > 0 and con.fillBases > 0:#must be
sequence = con.sequence#strandCorrector(data.stats["extendF2SeedStrand"], con.sequence)
data.stats["extendSeq2"] = sequence
data.stats["contribSeqs"] += con.contribSeqs
data.stats["contribBases"] += con.contribBases
data.stats["fillBases"] += con.fillBases
flank2Success = True
elif data.flank2Seed is not None:
logging.info("no mapping... picking f1 seq")
sequence = FastaFile(data.flank2Seed).values()[0]
data.stats["extendSeq2"] = sequence
data.stats["contribSeqs"] = 1
data.stats["contribBases"] = len(sequence)
data.stats["fillBases"] = len(sequence)
flank2Success = True
if flank1Success and flank2Success:
logging.debug("mid unite")
seq = singleOverlapAssembly(data, args)
if seq is not None:
data.stats["fillSeq"] = seq
return
def strandCorrector(strand, sequence):
"""
ensures that the sequence inside of data is from the same strand as the
first seed
if -, flip it
"""
logging.debug("Weird %s" % (strand))
if strand == '1':
sequence = sequence.translate(revComp)[::-1]
return sequence
def singleOverlapAssembly(alldata, args):
"""
"""
global ALLTEMPFILES
data = alldata.stats
reads = NamedTemporaryFile(prefix="sol_", suffix=".fasta", delete=False, dir=args.tempDir)
ALLTEMPFILES.append(reads.name)
e1Seq = data["extendSeq1"]; e2Seq = data["extendSeq2"]
reads.write(">%s\n%s\n>%s\n%s\n" % ("seq1", e1Seq, "seq2", e2Seq))
reads.close()
alignFn = NamedTemporaryFile(prefix="sol_",suffix=".m5", delete=False, dir=args.tempDir)
ALLTEMPFILES.append(alignFn.name)
blasr(reads.name, reads.name, nproc=args.nproc, outname=alignFn.name)
aligns = M5File(alignFn)
# find best hit between the two
connector = AlignmentConnector()
bestS = None
bestA = 0
for i in aligns:
if i.qname != i.tname:
if connector.extendsTarget(i):
if i.score < bestS:
bestA = i
bestS = i.score
if bestS is None:
logging.info("no overlap between extenders")
return
#any of these steps could fail --
#Ensure the hit is valid
#(if + + and sameStrand we are okay, if - + and not sameStrand we are okay)
if data["sameStrand"] == (bestA.tstrand == '0'):
logging.info("bad overlap between extenders")
return
con = consensus([bestA])
bestA = bestA[0]
#strand correction...
if bestA.qname == "seq1":
if bestA.tstrand == '1':
e2Seq = e2Seq[:bestA.tstart].translate(revComp)[::-1]
seq = e1Seq[:bestA.qstart] + con.sequence.translate(revComp)[::-1] + e2Seq
else:
seq = e1Seq[:bestA.qstart] + con.sequence + e2Seq[bestA.tend:]
else:
if bestA.tstrand == '1':
e2Seq = e2Seq[:bestA.qstart].translate(revComp)[::-1]
seq = e1Seq[:bestA.tstart] + con.sequence + e2Seq
else:
seq = e1Seq[:bestA.qstart] + con.sequence + e2Seq[bestA.tstart:]
return seq
def preunitereads(inputFastq, args):
"""
sent query, I'm going to pop all of the united reads onto this
"""
global ALLTEMPFILES
alignFile = NamedTemporaryFile(prefix="uni_", suffix=".m5", delete=False, dir=args.tempDir).name
ALLTEMPFILES.append(alignFile)
readFile = NamedTemporaryFile(prefix="uni_", suffix=".fasta", delete=False, dir=args.tempDir)
ALLTEMPFILES.append(readFile.name)
input = FastqFile(inputFastq)
for read in input:
readFile.write(">%s\n%s\n" % (input[read].name, input[read].seq))
readFile.close()
readFile = readFile.name
blasr(readFile, readFile, bestn=5, nCandidates=20, nproc=args.nproc, outname=alignFile)
aligns = M5File(alignFile)
con = AlignmentConnector()
extenders = []
for a in aligns:
if a.tname == a.qname:
continue
if a.qstart - a.qend < 500 or a.tstart - a.tend < 500:
continue
sup = con.extendsTarget(a, minCovers=500)
#sup = con.extendsTarget(a, minCovers=100)
a.support = sup
if sup in [SUPPORTFLAGS.left, SUPPORTFLAGS.right]:
extenders.append(a)
best = {}#best of queries
for i in extenders:
score = 0
if i.qname in best:
score = best[i.qname].score
if i.score < score:
best[i.qname] = i
#print "q"
#for i in best.values():
#print str(i)
best2 = {}#best of targets
for i in best.values():
score = 0
if i.tname in best2:
score = best2[i.tname].score
if i.score < score:
best2[i.tname] = i
#print "t"
#for i in best2.values():
#print str(i)
best3 = {}#best of both
for i in best2.values():
keys = [i.qname, i.tname]
keys.sort()
keys = "".join(keys)
score = 0
if keys in best3:
score = best3[keys].score
if i.score < score:
best3[keys] = i
#print 'b'
#for i in best3.values():
#print str(i)
reads = FastqFile(inputFastq)
fout = open(inputFastq, 'a')
count = 0
for i in best3.values():
qseq = None
if i.support == SUPPORTFLAGS.left:
if i.qstrand == '0':
qseq = reads[i.qname].seq + reads[i.tname].seq[i.tend:]
elif i.qstrand == '1':
qseq = reads[i.qname].seq + reads[i.tname].seq[i.tend:].translate(revComp)
if i.support == SUPPORTFLAGS.right:
if i.qstrand == '0':
qseq = reads[i.tname].seq[:i.tstart] + reads[i.qname].seq
elif i.qstrand == '1':
qseq = reads[i.tname].seq[:i.tstart].translate(revComp) + reads[i.qname].seq
if qseq is not None:
count += 1
fout.write("@%s_%s\n%s\n+\n%s\n" % (i.qname, i.tname, qseq, "!"*len(qseq)))
logging.info("Preunited %d reads" % (count))
fout.close()
def parseArgs():
"""
input dir
predicted gapsize
if argument says that we need to extract the seeds we will have a single paramters
extractFlanks
"""
parser = argparse.ArgumentParser(description=USAGE, \
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("asmdir", metavar="DIR", type=str, \
help="Local assembly directory for a gap")
parser.add_argument("-t", "--maxTrim", type=int, default=100, \
help="Maxmum trim allowed (100)")
parser.add_argument("-w", "--maxWiggle", type=int, default=400, \
help="Maxmum wiggle for gap spanning allowed (400)")
parser.add_argument("-p", "--predictedGapSize", type=int, default=None)
parser.add_argument("-n", "--nproc", type=int, default=1)
parser.add_argument("-k", "--keepTemp", action="store_true",\
help="Keep temporary files")
parser.add_argument("--tempDir", type=str, default=None,
help="Where to write temporary files (DIR)")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
if args.asmdir.endswith("/"):
args.asmdir = args.asmdir[:-1]
if args.tempDir is None:
args.tempDir = args.asmdir
setupLogging(args.debug)
return args
def run():
global ALLTEMPFILES
args = parseArgs()
dirName = os.path.basename(args.asmdir)
sameStrand, seeds = orderSeeds(dirName.split('_'))
inputReads = FastqFile(os.path.join(args.asmdir,"input.fastq"))
supportFn, flankFn = extractFlanks(inputReads, basedir=args.tempDir)
preunitereads(supportFn, args)
onFlank = NamedTemporaryFile(prefix="onFlank_", suffix=".m5", delete=False, dir=args.tempDir)
ALLTEMPFILES.append(onFlank.name)
onFlank.close()
tailblasr(supportFn, flankFn, nproc=args.nproc, \
outname=onFlank.name, basedir=args.tempDir)
data = getSubSeqs(onFlank.name, supportFn, sameStrand, seeds, \
args.predictedGapSize, args.maxTrim, args.maxWiggle, basedir=args.tempDir)
if data.stats["spanSeedName"] != "tooShortNs":
buildFillSeq(data, supportFn, args)
#if data.stats["support"][0] == SUPPORTFLAGS.span:
#logging.info("spanned gap")
#else:
#logging.info("seed1 extend %d - seed2 extend %d" % tuple(data.stats["support"][1:]))
data.stats["predictedGapSize"] = args.predictedGapSize
jOut = open(os.path.join(args.asmdir, "fillingMetrics.json"),'w')
jOut.write(json.dumps(data.stats,indent=4))
jOut.close()
if not args.keepTemp:
logging.info("Cleaning %d temp files" % (len(ALLTEMPFILES)))
for i in ALLTEMPFILES:
os.remove(i)
logging.info("Finished")
if __name__ == '__main__':
run()
|
py | b40fc6f336c285523bd099034cde8176a935eb78 | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
rospy.init_node("update_frame_id")
#Updating frame id for the error depth_front frame id does not match rgb_front frame id
class update_frame_id:
def __init__(self):
self.image = Image()
#subscribe to your specific sensors
self.sub_raw = rospy.Subscriber("/carla/ego_vehicle/rgb_front/image", Image, self.callback_raw)
self.sub_info = rospy.Subscriber("/carla/ego_vehicle/rgb_front/camera_info", CameraInfo, self.callback_info)
self.pub_raw = rospy.Publisher("/rgb/image_rect_color", Image, queue_size = 1)
self.pub_info = rospy.Publisher("/rgb/camera_info", CameraInfo, queue_size = 1)
def callback_raw(self, message):
message.header.frame_id = "ego_vehicle/depth_front"
self.pub_raw.publish(message)
def callback_info(self, message):
message.header.frame_id = "ego_vehicle/depth_front"
self.pub_info.publish(message)
update_frame_id = update_frame_id()
rospy.spin()
print("\nNode shutdown\n")
|
py | b40fc81931e20605e69fd8c6abb4d0c54f08bc16 | import numpy as np
import pytest
from pandas import (
DataFrame,
DatetimeIndex,
PeriodIndex,
Series,
date_range,
period_range,
)
import pandas._testing as tm
class TestToPeriod:
def test_to_period(self):
rng = date_range("1/1/2000", "1/1/2001", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range("1/1/2000", "1/1/2001")
tm.assert_series_equal(pts, exp)
pts = ts.to_period("M")
exp.index = exp.index.asfreq("M")
tm.assert_index_equal(pts.index, exp.index.asfreq("M"))
tm.assert_series_equal(pts, exp)
# GH#7606 without freq
idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"])
exp_idx = PeriodIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D"
)
s = Series(np.random.randn(4), index=idx)
expected = s.copy()
expected.index = exp_idx
tm.assert_series_equal(s.to_period(), expected)
df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx)
expected = df.copy()
expected.index = exp_idx
tm.assert_frame_equal(df.to_period(), expected)
expected = df.copy()
expected.columns = exp_idx
tm.assert_frame_equal(df.to_period(axis=1), expected)
def test_to_period_raises(self, index):
# https://github.com/pandas-dev/pandas/issues/33327
ser = Series(index=index, dtype=object)
if not isinstance(index, DatetimeIndex):
msg = f"unsupported Type {type(index).__name__}"
with pytest.raises(TypeError, match=msg):
ser.to_period()
|
py | b40fc8bc217bf902ba73281021ab3a3172da5068 | import re
import pytest
import redis
from unittest.mock import MagicMock, patch
from textwrap import dedent
from prompt_toolkit.formatted_text import FormattedText
from iredis.client import Client
from iredis.config import config, load_config_files
from iredis.completers import IRedisCompleter
from iredis.entry import Rainbow, prompt_message
from iredis.exceptions import NotSupport
from ..helpers import formatted_text_rematch
@pytest.fixture
def completer():
return IRedisCompleter()
@pytest.mark.parametrize(
"_input, command_name, expect_args",
[
("keys *", "keys", ["*"]),
("DEL abc foo bar", "DEL", ["abc", "foo", "bar"]),
("cluster info", "cluster info", []),
("CLUSTER failover FORCE", "CLUSTER failover", ["FORCE"]),
],
)
def test_send_command(_input, command_name, expect_args):
client = Client("127.0.0.1", "6379", None)
client.execute = MagicMock()
next(client.send_command(_input, None))
args, kwargs = client.execute.call_args
assert args == (command_name, *expect_args)
def test_client_not_support_hello_command(iredis_client):
with pytest.raises(NotSupport):
iredis_client.pre_hook("hello 3", "hello", "3", None)
def test_patch_completer():
client = Client("127.0.0.1", "6379", None)
completer = IRedisCompleter()
client.pre_hook(
"MGET foo bar hello world", "MGET", "foo bar hello world", completer
)
assert completer.key_completer.words == ["world", "hello", "bar", "foo"]
assert completer.key_completer.words == ["world", "hello", "bar", "foo"]
client.pre_hook("GET bar", "GET", "bar", completer)
assert completer.key_completer.words == ["bar", "world", "hello", "foo"]
def test_get_server_verison_after_client(config):
Client("127.0.0.1", "6379", None)
assert re.match(r"\d+\..*", config.version)
config.version = "Unknown"
config.no_info = True
Client("127.0.0.1", "6379", None)
assert config.version == "Unknown"
def test_do_help(config):
client = Client("127.0.0.1", "6379", None)
config.version = "5.0.0"
resp = client.do_help("SET")
assert resp[10] == ("", "1.0.0 (Available on your redis-server: 5.0.0)")
config.version = "2.0.0"
resp = client.do_help("cluster", "addslots")
assert resp[10] == ("", "3.0.0 (Not available on your redis-server: 2.0.0)")
def test_rainbow_iterator():
"test color infinite iterator"
original_color = Rainbow.color
Rainbow.color = list(range(0, 3))
assert list(zip(range(10), Rainbow())) == [
(0, 0),
(1, 1),
(2, 2),
(3, 1),
(4, 0),
(5, 1),
(6, 2),
(7, 1),
(8, 0),
(9, 1),
]
Rainbow.color = original_color
def test_prompt_message(iredis_client, config):
config.rainbow = False
assert prompt_message(iredis_client) == "127.0.0.1:6379[15]> "
config.rainbow = True
assert prompt_message(iredis_client)[:3] == [
("#cc2244", "1"),
("#bb4444", "2"),
("#996644", "7"),
]
def test_on_connection_error_retry(iredis_client, config):
config.retry_times = 1
mock_connection = MagicMock()
mock_connection.read_response.side_effect = [
redis.exceptions.ConnectionError(
"Error 61 connecting to 127.0.0.1:7788. Connection refused."
),
"hello",
]
original_connection = iredis_client.connection
iredis_client.connection = mock_connection
value = iredis_client.execute("None", "GET", ["foo"])
assert value == "hello" # be rendered
mock_connection.disconnect.assert_called_once()
mock_connection.connect.assert_called_once()
iredis_client.connection = original_connection
def test_on_connection_error_retry_without_retrytimes(iredis_client, config):
config.retry_times = 0
mock_connection = MagicMock()
mock_connection.read_response.side_effect = [
redis.exceptions.ConnectionError(
"Error 61 connecting to 127.0.0.1:7788. Connection refused."
),
"hello",
]
iredis_client.connection = mock_connection
with pytest.raises(redis.exceptions.ConnectionError):
iredis_client.execute("None", "GET", ["foo"])
mock_connection.disconnect.assert_not_called()
mock_connection.connect.assert_not_called()
def test_socket_keepalive(config):
config.socket_keepalive = True
from iredis.client import Client
newclient = Client("127.0.0.1", "6379", 0)
assert newclient.connection.socket_keepalive
# keepalive off
config.socket_keepalive = False
newclient = Client("127.0.0.1", "6379", 0)
assert not newclient.connection.socket_keepalive
def test_not_retry_on_authentication_error(iredis_client, config):
config.retry_times = 2
mock_connection = MagicMock()
mock_connection.read_response.side_effect = [
redis.exceptions.AuthenticationError("Authentication required."),
"hello",
]
iredis_client.connection = mock_connection
with pytest.raises(redis.exceptions.ConnectionError):
iredis_client.execute("None", "GET", ["foo"])
@pytest.mark.skipif("int(os.environ['REDIS_VERSION']) < 6")
def test_auto_select_db_and_auth_for_reconnect_only_6(iredis_client, config):
config.retry_times = 2
config.raw = True
next(iredis_client.send_command("select 2"))
assert iredis_client.connection.db == 2
resp = next(iredis_client.send_command("auth 123"))
assert (
b"ERROR AUTH <password> called without any "
b"password configured for the default user. "
b"Are you sure your configuration is correct?" in resp
)
assert iredis_client.connection.password is None
next(iredis_client.send_command("config set requirepass 'abc'"))
next(iredis_client.send_command("auth abc"))
assert iredis_client.connection.password == "abc"
assert (
iredis_client.execute("ACL SETUSER", "default", "on", "nopass", "~*", "+@all")
== b"OK"
)
@pytest.mark.skipif("int(os.environ['REDIS_VERSION']) > 5")
def test_auto_select_db_and_auth_for_reconnect_only_5(iredis_client, config):
config.retry_times = 2
config.raw = True
next(iredis_client.send_command("select 2"))
assert iredis_client.connection.db == 2
resp = next(iredis_client.send_command("auth 123"))
assert b"Client sent AUTH, but no password is set" in resp
assert iredis_client.connection.password is None
next(iredis_client.send_command("config set requirepass 'abc'"))
next(iredis_client.send_command("auth abc"))
assert iredis_client.connection.password == "abc"
next(iredis_client.send_command("config set requirepass ''"))
def test_split_shell_command(iredis_client, completer):
assert iredis_client.split_command_and_pipeline(" get json | rg . ", completer) == (
" get json ",
"rg . ",
)
assert iredis_client.split_command_and_pipeline(
""" get "json | \\" hello" | rg . """, completer
) == (""" get "json | \\" hello" """, "rg . ")
def test_running_with_pipeline(clean_redis, iredis_client, capfd, completer):
config.shell = True
clean_redis.set("foo", "hello \n world")
with pytest.raises(StopIteration):
next(iredis_client.send_command("get foo | grep w", completer))
out, err = capfd.readouterr()
assert out == " world\n"
def test_running_with_multiple_pipeline(clean_redis, iredis_client, capfd, completer):
config.shell = True
clean_redis.set("foo", "hello world\nhello iredis")
with pytest.raises(StopIteration):
next(
iredis_client.send_command("get foo | grep hello | grep iredis", completer)
)
out, err = capfd.readouterr()
assert out == "hello iredis\n"
def test_can_not_connect_on_startup(capfd):
with pytest.raises(SystemExit):
Client("localhost", "16111", 15)
out, err = capfd.readouterr()
assert "connecting to localhost:16111." in err
def test_peek_key_not_exist(iredis_client, clean_redis, config):
config.raw = False
peek_result = list(iredis_client.do_peek("non-exist-key"))
assert peek_result == ["non-exist-key doesn't exist."]
def test_peek_string(iredis_client, clean_redis):
clean_redis.set("foo", "bar")
peek_result = list(iredis_client.do_peek("foo"))
assert peek_result[0][0] == ("class:dockey", "key: ")
assert re.match(r"string \(embstr\) mem: \d+ bytes, ttl: -1", peek_result[0][1][1])
assert peek_result[0][2:] == [
("", "\n"),
("class:dockey", "strlen: "),
("", "3"),
("", "\n"),
("class:dockey", "value: "),
("", '"bar"'),
]
def test_peek_list_fetch_all(iredis_client, clean_redis):
clean_redis.lpush("mylist", *[f"hello-{index}" for index in range(5)])
peek_result = list(iredis_client.do_peek("mylist"))
formatted_text_rematch(
peek_result[0],
FormattedText(
[
("class:dockey", "key: "),
("", r"list \(quicklist\) mem: \d+ bytes, ttl: -1"),
("", "\n"),
("class:dockey", "llen: "),
("", "5"),
("", "\n"),
("class:dockey", "elements: "),
("", "\n"),
("", r"1\)"),
("", " "),
("class:string", '"hello-4"'),
("", "\n"),
("", r"2\)"),
("", " "),
("class:string", '"hello-3"'),
("", "\n"),
("", r"3\)"),
("", " "),
("class:string", '"hello-2"'),
("", "\n"),
("", r"4\)"),
("", " "),
("class:string", '"hello-1"'),
("", "\n"),
("", r"5\)"),
("", " "),
("class:string", '"hello-0"'),
]
),
)
def test_peek_list_fetch_part(iredis_client, clean_redis):
clean_redis.lpush("mylist", *[f"hello-{index}" for index in range(40)])
peek_result = list(iredis_client.do_peek("mylist"))
assert len(peek_result[0]) == 91
def test_peek_set_fetch_all(iredis_client, clean_redis):
clean_redis.sadd("myset", *[f"hello-{index}" for index in range(5)])
peek_result = list(iredis_client.do_peek("myset"))
assert len(peek_result[0]) == 27
def test_peek_set_fetch_part(iredis_client, clean_redis):
clean_redis.sadd("myset", *[f"hello-{index}" for index in range(40)])
peek_result = list(iredis_client.do_peek("myset"))
assert peek_result[0][0] == ("class:dockey", "key: ")
assert peek_result[0][1][1].startswith("set (hashtable) mem: 2")
def test_peek_zset_fetch_all(iredis_client, clean_redis):
clean_redis.zadd(
"myzset", dict(zip([f"hello-{index}" for index in range(3)], range(3)))
)
peek_result = list(iredis_client.do_peek("myzset"))
formatted_text_rematch(
peek_result[0][0:9],
FormattedText(
[
("class:dockey", "key: "),
("", r"zset \(ziplist\) mem: \d+ bytes, ttl: -1"),
("", "\n"),
("class:dockey", "zcount: "),
("", "3"),
("", "\n"),
("class:dockey", "members: "),
("", "\n"),
("", r"1\)"),
]
),
)
def test_peek_zset_fetch_part(iredis_client, clean_redis):
clean_redis.zadd(
"myzset", dict(zip([f"hello-{index}" for index in range(40)], range(40)))
)
peek_result = list(iredis_client.do_peek("myzset"))
formatted_text_rematch(
peek_result[0][0:8],
FormattedText(
[
("class:dockey", "key: "),
("", r"zset \(ziplist\) mem: \d+ bytes, ttl: -1"),
("", "\n"),
("class:dockey", "zcount: "),
("", "40"),
("", "\n"),
("class:dockey", r"members \(first 40\): "),
("", "\n"),
]
),
)
def test_peek_hash_fetch_all(iredis_client, clean_redis):
for key, value in zip(
[f"hello-{index}" for index in range(3)], [f"hi-{index}" for index in range(3)]
):
clean_redis.hset("myhash", key, value)
peek_result = list(iredis_client.do_peek("myhash"))
assert len(peek_result[0]) == 28
def test_peek_hash_fetch_part(iredis_client, clean_redis):
for key, value in zip(
[f"hello-{index}" for index in range(100)],
[f"hi-{index}" for index in range(100)],
):
clean_redis.hset("myhash", key, value)
peek_result = list(iredis_client.do_peek("myhash"))
assert len(peek_result[0]) == 707
def test_peek_stream(iredis_client, clean_redis):
clean_redis.xadd("mystream", {"foo": "bar", "hello": "world"})
peek_result = list(iredis_client.do_peek("mystream"))
assert peek_result[0][0] == ("class:dockey", "key: ")
assert re.match(
r"stream \((stream|unknown)\) mem: 6\d\d bytes, ttl: -1", peek_result[0][1][1]
)
assert peek_result[0][2:18] == FormattedText(
[
("", "\n"),
("class:dockey", "XINFO: "),
("", "\n"),
("", " 1)"),
("", " "),
("class:string", '"length"'),
("", "\n"),
("", " 2)"),
("", " "),
("class:string", '"1"'),
("", "\n"),
("", " 3)"),
("", " "),
("class:string", '"radix-tree-keys"'),
("", "\n"),
("", " 4)"),
]
)
def test_mem_not_called_before_redis_4(config, iredis_client, clean_redis):
config.version = "3.2.9"
def wrapper(func):
def execute(command_name, *args):
print(command_name)
if command_name.upper() == "MEMORY USAGE":
raise Exception("MEMORY USAGE not supported!")
return func(command_name, *args)
return execute
iredis_client.execute = wrapper(iredis_client.execute)
clean_redis.set("foo", "bar")
result = list(iredis_client.do_peek("foo"))
assert result[0][1] == ("", "string (embstr), ttl: -1")
def test_mem_not_called_when_cant_get_server_version(
config, iredis_client, clean_redis
):
config.version = None
def wrapper(func):
def execute(command_name, *args):
print(command_name)
if command_name.upper() == "MEMORY USAGE":
raise Exception("MEMORY USAGE not supported!")
return func(command_name, *args)
return execute
iredis_client.execute = wrapper(iredis_client.execute)
clean_redis.set("foo", "bar")
result = list(iredis_client.do_peek("foo"))
assert result[0][1] == ("", "string (embstr), ttl: -1")
def test_reissue_command_on_redis_cluster(iredis_client, clean_redis):
mock_response = iredis_client.connection = MagicMock()
mock_response.read_response.side_effect = redis.exceptions.ResponseError(
"MOVED 12182 127.0.0.1:7002"
)
iredis_client.reissue_with_redirect = MagicMock()
iredis_client.execute("set", "foo", "bar")
assert iredis_client.reissue_with_redirect.call_args == (
(
"MOVED 12182 127.0.0.1:7002",
"set",
"foo",
"bar",
),
)
def test_reissue_command_on_redis_cluster_with_password_in_dsn(
iredis_client, clean_redis
):
config_content = dedent(
"""
[main]
log_location = /tmp/iredis1.log
no_info=True
[alias_dsn]
cluster-7003=redis://foo:[email protected]:7003
"""
)
with open("/tmp/iredisrc", "w+") as etc_config:
etc_config.write(config_content)
config_obj = load_config_files("/tmp/iredisrc")
config.alias_dsn = config_obj["alias_dsn"]
mock_execute_by_connection = iredis_client.execute_by_connection = MagicMock()
with patch("redis.connection.Connection.connect"):
iredis_client.reissue_with_redirect(
"MOVED 12182 127.0.0.1:7003", "set", "foo", "bar"
)
call_args = mock_execute_by_connection.call_args[0]
print(call_args)
assert list(call_args[1:]) == ["set", "foo", "bar"]
assert call_args[0].password == "bar"
|
py | b40fc9319760b6bfa4a5a905fd521e791c096d9a | from ..core.Processor import Processor
class UpdateAttribute(Processor):
def __init__(self, schedule={'scheduling strategy': 'EVENT_DRIVEN'}):
super(UpdateAttribute, self).__init__(
'UpdateAttribute',
auto_terminate=['success'],
schedule=schedule)
|
py | b40fc9f6519691bf1b9a48cefad7f7fa90895d94 | # -*- coding: utf-8 -*-
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
import numpy as np
import pandas as pd
import pytest
from genemap import map_ids, map_dataframe, fetch_mapping
HOST = 'http://aug2014.archive.ensembl.org'
@pytest.fixture
def ensembl_kws():
"""Default keyword arguments from the Ensembl mapper."""
return {'host': HOST, 'mapper': 'ensembl'}
# pylint: disable=R0201,W0621
class TestMapIds(object):
"""Tests for map_ids function."""
def test_same_organism(self, ensembl_kws):
"""Tests conversion beteen different id types within same organism."""
mapped = map_ids(
['ENSG00000141510', 'ENSG00000012048'],
from_type='ensembl',
to_type='symbol',
**ensembl_kws)
assert mapped == ['TP53', 'BRCA1']
def test_between_organisms(self, ensembl_kws):
"""Tests conversion between organisms using only ensembl ids."""
mapped = map_ids(
['ENSG00000141510', 'ENSG00000012048'],
from_type='ensembl',
to_type='ensembl',
from_organism='hsapiens',
to_organism='mmusculus',
**ensembl_kws)
assert mapped == ['ENSMUSG00000059552', 'ENSMUSG00000017146']
def test_between_organisms_symbol(self, ensembl_kws):
"""Tests conversion with different ids types between organisms."""
mapped = map_ids(
['TP53', 'BRCA1'],
from_type='symbol',
to_type='symbol',
from_organism='hsapiens',
to_organism='mmusculus',
**ensembl_kws)
assert mapped == ['Trp53', 'Brca1']
def test_invalid_id(self, ensembl_kws):
"""Tests querying with invalid (or unknown) ids."""
mapped = map_ids(
['ENSG00000141510', 'ENSG00000012048', 'INVALID'],
from_type='ensembl',
to_type='symbol',
**ensembl_kws)
assert mapped == ['TP53', 'BRCA1', None]
class TestFetchMapping(object):
"""Tests fetch_mapping function."""
def test_same_organism(self, ensembl_kws):
"""Tests mapping beteen different id types within same organism."""
mapping = fetch_mapping(
from_type='ensembl', to_type='symbol', **ensembl_kws)
assert len(mapping) > 0
assert list(mapping.columns) == ['hsapiens_ensembl', 'hsapiens_symbol']
def build_df(index):
"""Helper function to build a random data frame."""
return pd.DataFrame(
{
'S1': np.random.randn(len(index)),
'S2': np.random.randn(len(index))
},
index=index)
class TestMapFrame(object):
"""Tests for map_frame function."""
def test_same_organism(self, ensembl_kws):
"""Tests conversion beteen different id types within same organism."""
df = build_df(index=['ENSG00000141510', 'ENSG00000012048'])
mapped = map_dataframe(
df, from_type='ensembl', to_type='symbol', **ensembl_kws)
assert list(mapped.index) == ['TP53', 'BRCA1']
def test_between_organisms(self, ensembl_kws):
"""Tests conversion between organisms using only ensembl ids."""
df = build_df(index=['ENSG00000141510', 'ENSG00000012048'])
mapped = map_dataframe(
df,
from_type='ensembl',
to_type='ensembl',
from_organism='hsapiens',
to_organism='mmusculus',
**ensembl_kws)
assert list(mapped.index) == [
'ENSMUSG00000059552', 'ENSMUSG00000017146'
]
def test_between_organisms_symbol(self, ensembl_kws):
"""Tests conversion with different ids types between organisms."""
df = build_df(index=['TP53', 'BRCA1'])
mapped = map_dataframe(
df,
from_type='symbol',
to_type='symbol',
from_organism='hsapiens',
to_organism='mmusculus',
**ensembl_kws)
assert list(mapped.index) == ['Trp53', 'Brca1']
|
py | b40fc9fa9b9136e724c82337da23fef2cb1f56d1 | from django.dispatch import Signal
content_object_state_change = Signal(providing_args=["content_object", "created"])
content_object_delete = Signal(providing_args=["content_object"])
|
py | b40fca473c56978dd791e879d481759cd3736f0b | # -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import mock
from compat import unittest
from gitsome.github import GitHub
from tests.mock_feed_parser import MockFeedParser
from tests.mock_github_api import MockGitHubApi
from tests.mock_pretty_date_time import pretty_date_time
from tests.data.email import formatted_emails
from tests.data.emoji import formatted_emojis
from tests.data.events import formatted_events
from tests.data.user import formatted_org, formatted_user, formatted_users
from tests.data.gitignores import formatted_gitignores, formatted_gitignores_tip
from tests.data.issue import formatted_issues, formatted_pull_requests
from tests.data.license import formatted_licenses, formatted_licenses_tip
from tests.data.thread import formatted_threads
from tests.data.trends import formatted_trends
from tests.data.user_feed import formatted_user_feed
class GitHubTest(unittest.TestCase):
def setUp(self):
self.github = GitHub()
self.github.config.api = MockGitHubApi()
self.github.formatter.pretty_dt = pretty_date_time
self.github.trend_parser = MockFeedParser()
def test_avatar_no_pil(self):
avatar_text = self.github.avatar(
'https://avatars.githubusercontent.com/u/583231?v=3', False)
assert avatar_text == 'PIL not found.\n'
@mock.patch('gitsome.github.click.secho')
def test_create_comment(self, mock_click_secho):
self.github.create_comment('user1/repo1/1', 'text')
mock_click_secho.assert_called_with(
'Created comment: text',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_create_comment_invalid_args(self, mock_click_secho):
self.github.create_comment('invalid/repo1/1', 'text')
mock_click_secho.assert_called_with(
'Error creating comment',
fg=self.github.config.clr_error)
self.github.create_comment('user1/repo1/foo', 'text')
mock_click_secho.assert_called_with(
'Expected argument: user/repo/# and option -t "comment".',
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
def test_create_issue(self, mock_click_secho):
self.github.create_issue('user1/repo1', 'title', 'desc')
mock_click_secho.assert_called_with(
'Created issue: title\ndesc',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_create_issue_no_desc(self, mock_click_secho):
self.github.create_issue('user1/repo1', 'title', issue_desc=None)
mock_click_secho.assert_called_with(
'Created issue: title\n',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_create_issue_invalid_args(self, mock_click_secho):
self.github.create_issue('invalid/repo1', 'title', 'desc')
mock_click_secho.assert_called_with(
'Error creating issue.',
fg=self.github.config.clr_error)
self.github.create_issue('u', 'title', 'desc')
mock_click_secho.assert_called_with(
'Expected argument: user/repo and option -t "title".',
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
def test_create_repo(self, mock_click_secho):
self.github.create_repo('name', 'desc', True)
mock_click_secho.assert_called_with(
'Created repo: name\ndesc',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_create_repo_no_desc(self, mock_click_secho):
self.github.create_repo('name', repo_desc=None)
mock_click_secho.assert_called_with(
'Created repo: name\n',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_create_repo_invalid_args(self, mock_click_secho):
self.github.create_repo('repo1', 'desc', True)
mock_click_secho.assert_called_with(
'Error creating repo: foobar',
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
def test_emails(self, mock_click_secho):
self.github.emails()
mock_click_secho.assert_called_with(formatted_emails)
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.config.Config.prompt_news_feed')
def test_feed_config(self, mock_config_prompt_news_feed, mock_click_secho):
self.github.feed()
mock_config_prompt_news_feed.assert_called_with()
@mock.patch('gitsome.github.click.secho')
def test_feed(self, mock_click_secho):
self.github.config.user_feed = 'user_feed'
self.github.feed()
mock_click_secho.assert_called_with(formatted_user_feed)
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.config.Config')
def test_feed_user(self, mock_config, mock_click_secho):
self.github.feed('user1')
mock_click_secho.assert_called_with(formatted_events)
@mock.patch('gitsome.github.click.secho')
def test_emojis(self, mock_click_secho):
self.github.emojis()
mock_click_secho.assert_called_with(formatted_emojis)
@mock.patch('gitsome.github.click.secho')
def test_followers(self, mock_click_secho):
self.github.followers('foo')
mock_click_secho.assert_called_with(formatted_users)
@mock.patch('gitsome.github.click.secho')
def test_following(self, mock_click_secho):
self.github.following('foo')
mock_click_secho.assert_called_with(formatted_users)
@mock.patch('gitsome.github.click.secho')
def test_gitignore_template(self, mock_click_secho):
self.github.gitignore_template('valid_language')
mock_click_secho.assert_called_with(
'template',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_gitignore_template_invalid(self, mock_click_secho):
self.github.gitignore_template('invalid_language')
mock_click_secho.assert_called_with(
('Invalid case-sensitive template requested, run the '
'following command to see available templates:\n'
' gh gitignore-templates'),
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
def test_gitignore_templates(self, mock_click_secho):
self.github.gitignore_templates()
mock_click_secho.assert_any_call(formatted_gitignores)
mock_click_secho.assert_any_call(formatted_gitignores_tip,
fg=self.github.config.clr_message)
@mock.patch('gitsome.web_viewer.WebViewer.view_url')
def test_issue(self, mock_view_url):
self.github.issue('user1/repo1/1')
mock_view_url.assert_called_with(
'https://github.com/user1/repo1/issues/1')
@mock.patch('gitsome.github.click.secho')
def test_issue_invalid_args(self, mock_click_secho):
self.github.issue('user1/repo1/foo')
mock_click_secho.assert_called_with(
'Expected argument: user/repo/#.',
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
def test_issues_setup(self, mock_click_secho):
self.github.issues_setup()
mock_click_secho.assert_called_with(formatted_issues)
@mock.patch('gitsome.github.click.secho')
def test_license(self, mock_click_secho):
self.github.license('valid_license')
mock_click_secho.assert_called_with(
'template',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_license_invalid(self, mock_click_secho):
self.github.license('invalid_license')
mock_click_secho.assert_called_with(
(' Invalid case-sensitive license requested, run the '
'following command to see available licenses:\n'
' gh licenses'),
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
def test_licenses(self, mock_click_secho):
self.github.licenses()
mock_click_secho.assert_any_call(formatted_licenses)
mock_click_secho.assert_any_call(formatted_licenses_tip,
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_notifications(self, mock_click_secho):
self.github.notifications()
mock_click_secho.assert_called_with(formatted_threads)
@mock.patch('gitsome.github.click.secho')
def test_octocat(self, mock_click_secho):
self.github.octocat('foo\\nbar')
mock_click_secho.assert_called_with(
'foo\nbar',
fg=self.github.config.clr_message)
@mock.patch('gitsome.github.click.secho')
def test_pull_requests(self, mock_click_secho):
self.github.pull_requests()
mock_click_secho.assert_called_with(formatted_pull_requests)
@mock.patch('gitsome.github.click.secho')
def test_rate_limit(self, mock_click_secho):
self.github.rate_limit()
mock_click_secho.assert_called_with(
'Rate limit: 5000',
fg=self.github.config.clr_message)
@mock.patch('gitsome.web_viewer.WebViewer.view_url')
def test_repository(self, mock_view_url):
self.github.repository('user1/repo1')
mock_view_url.assert_called_with(
'https://github.com/user1/repo1')
@mock.patch('gitsome.github.click.secho')
def test_repository_invalid(self, mock_click_secho):
self.github.repository('user1/repo1/1')
mock_click_secho.assert_called_with(
'Expected argument: user/repo.',
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.github.GitHub.issues')
def test_search_issues(self, mock_github_issues, mock_click_secho):
self.github.search_issues('foo')
mock_github_issues.assert_called_with(
['foobar', 'foobar', 'foobar'], 1000, False, sort=False)
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.github.GitHub.repositories')
def test_search_repos(self, mock_github_repositories, mock_click_secho):
self.github.search_repositories('foo', 'stars')
mock_github_repositories.assert_called_with(
['foobar'], 1000, False, sort=False)
@mock.patch('gitsome.github.click.secho')
def test_trending(self, mock_click_secho):
self.github.trending('Python', False, False, False)
mock_click_secho.assert_called_with(formatted_trends)
@mock.patch('gitsome.github.click.secho')
def test_user(self, mock_click_secho):
self.github.user('user1')
mock_click_secho.assert_called_with(formatted_user)
self.github.user('user2')
mock_click_secho.assert_called_with(formatted_org)
@mock.patch('gitsome.github.click.secho')
def test_user_invalid(self, mock_click_secho):
self.github.user('invalid_user')
mock_click_secho.assert_called_with(
'Invalid user.',
fg=self.github.config.clr_error)
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.github.webbrowser.open')
def test_user_browser(self, mock_webbrowser_open, mock_click_secho):
self.github.user('invalid_user', browser=True)
mock_webbrowser_open.assert_called_with(
'https://github.com/invalid_user')
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.github.webbrowser.open')
def test_view_browser(self, mock_webbrowser_open, mock_click_secho):
self.github.config.load_urls = lambda x: ['user1/foo']
self.github.view(1, view_in_browser=True)
mock_webbrowser_open.assert_called_with(
'https://github.com/user1/foo')
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.github.GitHub.issue')
def test_view_issue(self, mock_github_issue, mock_click_secho):
self.github.config.load_urls = lambda x: ['user1/foo/issues/1']
self.github.view(0)
mock_github_issue.assert_called_with('user1/foo/1')
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.github.GitHub.repository')
def test_view_repo(self, mock_github_repository, mock_click_secho):
self.github.config.load_urls = lambda x: ['user1/foo']
self.github.view(0)
mock_github_repository.assert_called_with('user1/foo')
@mock.patch('gitsome.github.click.secho')
@mock.patch('gitsome.web_viewer.WebViewer.view_url')
def test_view_user(self, mock_view_url, mock_click_secho):
self.github.config.load_urls = lambda x: ['user1']
self.github.view(0)
mock_view_url.assert_called_with('https://github.com/user1')
def test_base_url(self):
self.github.config.enterprise_url = 'https://github.intra.example.com'
assert self.github.base_url == 'https://github.intra.example.com'
self.github.config.enterprise_url = None
assert self.github.base_url == self.github._base_url
def test_add_base_url(self):
expected = self.github.base_url + 'foo.html'
assert self.github.add_base_url('foo.html') == expected
assert self.github.add_base_url(expected) == expected
|
py | b40fca47befdd1ce5827f055900f4dfc13611b20 | import os.path as osp
import torchvision.transforms as transforms
from cvpods.configs.base_classification_config import BaseClassificationConfig
import loader
_config_dict = dict(
MODEL=dict(
WEIGHTS="",
AS_PRETRAIN=True,
RESNETS=dict(
DEPTH=50,
NUM_CLASSES=1000,
NORM="SyncBN",
OUT_FEATURES=["linear"],
STRIDE_IN_1X1=False, # default true for msra models
ZERO_INIT_RESIDUAL=True, # default false, use true for all subsequent models
),
CLR=dict(
DIM=128,
TAU=0.2,
MLP=True,
NORM="SyncBN",
),
),
DATASETS=dict(
TRAIN=("imagenet_train", ),
TEST=("imagenet_val", ),
),
DATALOADER=dict(NUM_WORKERS=8, ),
SOLVER=dict(
LR_SCHEDULER=dict(
NAME="WarmupCosineLR",
MAX_EPOCH=200,
WARMUP_ITERS=10,
),
OPTIMIZER=dict(
NAME="SGD",
LARS=dict(
ENABLED=False,
EPS=1e-8,
TRUST_COEF=1e-3,
),
BASE_LR=0.03,
MOMENTUM=0.9,
WEIGHT_DECAY=1e-4,
WEIGHT_DECAY_NORM=1e-4,
),
CHECKPOINT_PERIOD=10,
IMS_PER_BATCH=256,
IMS_PER_DEVICE=32,
),
INPUT=dict(
AUG=dict(
TRAIN_PIPELINES=[
("RepeatList", dict(transforms=[
("Torch_Compose", transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomApply([loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomGrayscale(p=0.2),
transforms.RandomHorizontalFlip(),
])),
], repeat_times=2)),
],
)
),
OUTPUT_DIR=osp.join(
'/data/Outputs/model_logs/cvpods_playground/SelfSup',
osp.split(osp.realpath(__file__))[0].split("SelfSup/")[-1]))
class MoCoV2Config(BaseClassificationConfig):
def __init__(self):
super(MoCoV2Config, self).__init__()
self._register_configuration(_config_dict)
config = MoCoV2Config()
|
py | b40fcb82ea7e2ac3ed86cedbc607f19f5244e41d | import attr
from falcon_auth.backends import AuthBackend
from ebl.bibliography.application.bibliography import Bibliography
from ebl.bibliography.application.bibliography_repository import BibliographyRepository
from ebl.changelog import Changelog
from ebl.corpus.infrastructure.mongo_text_repository import MongoTextRepository
from ebl.dictionary.application.word_repository import WordRepository
from ebl.files.application.file_repository import FileRepository
from ebl.fragmentarium.application.annotations_repository import AnnotationsRepository
from ebl.fragmentarium.application.fragment_repository import FragmentRepository
from ebl.fragmentarium.application.fragment_updater import FragmentUpdater
from ebl.fragmentarium.application.transliteration_update_factory import (
TransliterationUpdateFactory,
)
from ebl.lemmatization.application.suggestion_finder import LemmaRepository
from ebl.transliteration.application.sign_repository import SignRepository
from ebl.transliteration.application.transliteration_query_factory import (
TransliterationQueryFactory,
)
@attr.s(auto_attribs=True, frozen=True)
class Context:
auth_backend: AuthBackend
word_repository: WordRepository
sign_repository: SignRepository
public_file_repository: FileRepository
photo_repository: FileRepository
folio_repository: FileRepository
fragment_repository: FragmentRepository
changelog: Changelog
bibliography_repository: BibliographyRepository
text_repository: MongoTextRepository
annotations_repository: AnnotationsRepository
lemma_repository: LemmaRepository
def get_bibliography(self):
return Bibliography(self.bibliography_repository, self.changelog)
def get_fragment_updater(self):
return FragmentUpdater(
self.fragment_repository,
self.changelog,
self.get_bibliography(),
self.photo_repository,
)
def get_transliteration_update_factory(self):
return TransliterationUpdateFactory(self.sign_repository)
def get_transliteration_query_factory(self):
return TransliterationQueryFactory(self.sign_repository)
|
py | b40fcbc02e358707947b65518a92475cef2bf0cb | import os
import unittest
from rdflib import Graph
class RdflibParserTestCase(unittest.TestCase):
def test_issue_1(self):
""" Another test for the rdflib embedded quote problem
See line 1578 in notation3.py:
k = 'abfrtvn\\"\''.find(ch)
if k >= 0:
uch = '\a\b\f\r\t\v\n\\"\''[k]
"""
g = Graph()
data_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data'
))
g.load(os.path.join(data_file_path, '1literalPattern_with_all_punctuation.ttl'), format="turtle")
self.assertTrue(True, "")
if __name__ == '__main__':
unittest.main()
|
py | b40fcc8e27f4ccafb7e2a22110beeddfcfaa7b50 | from .pull import pull
from .push import push
from .sorted_merge import sorted_merge
|
py | b40fcca1794cfd27463bfaf46ce33b1a040d7098 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkkms.endpoint import endpoint_data
class ListSecretsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Kms', '2016-01-20', 'ListSecrets','kms')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Filters(self):
return self.get_query_params().get('Filters')
def set_Filters(self,Filters):
self.add_query_param('Filters',Filters)
def get_FetchTags(self):
return self.get_query_params().get('FetchTags')
def set_FetchTags(self,FetchTags):
self.add_query_param('FetchTags',FetchTags)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber) |
py | b40fcf5fba8fd1521abb085df16b68eacb4e0bc8 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ttypes import *
from ...thrift.Thrift import TProcessor
from ...thrift.transport import TTransport
try:
from ...thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def defineStream(self, sessionId, streamDefinition):
"""
Parameters:
- sessionId
- streamDefinition
"""
pass
def findStreamId(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
pass
def publish(self, eventBundle):
"""
Parameters:
- eventBundle
"""
pass
def deleteStreamById(self, sessionId, streamId):
"""
Parameters:
- sessionId
- streamId
"""
pass
def deleteStreamByNameVersion(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def defineStream(self, sessionId, streamDefinition):
"""
Parameters:
- sessionId
- streamDefinition
"""
self.send_defineStream(sessionId, streamDefinition)
return self.recv_defineStream()
def send_defineStream(self, sessionId, streamDefinition):
self._oprot.writeMessageBegin('defineStream', TMessageType.CALL, self._seqid)
args = defineStream_args()
args.sessionId = sessionId
args.streamDefinition = streamDefinition
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_defineStream(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = defineStream_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ade is not None:
raise result.ade
if result.mtd is not None:
raise result.mtd
if result.tde is not None:
raise result.tde
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "defineStream failed: unknown result");
def findStreamId(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
self.send_findStreamId(sessionId, streamName, streamVersion)
return self.recv_findStreamId()
def send_findStreamId(self, sessionId, streamName, streamVersion):
self._oprot.writeMessageBegin('findStreamId', TMessageType.CALL, self._seqid)
args = findStreamId_args()
args.sessionId = sessionId
args.streamName = streamName
args.streamVersion = streamVersion
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_findStreamId(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = findStreamId_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.tnde is not None:
raise result.tnde
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "findStreamId failed: unknown result");
def publish(self, eventBundle):
"""
Parameters:
- eventBundle
"""
self.send_publish(eventBundle)
self.recv_publish()
def send_publish(self, eventBundle):
self._oprot.writeMessageBegin('publish', TMessageType.CALL, self._seqid)
args = publish_args()
args.eventBundle = eventBundle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_publish(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = publish_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ue is not None:
raise result.ue
if result.se is not None:
raise result.se
return
def deleteStreamById(self, sessionId, streamId):
"""
Parameters:
- sessionId
- streamId
"""
self.send_deleteStreamById(sessionId, streamId)
return self.recv_deleteStreamById()
def send_deleteStreamById(self, sessionId, streamId):
self._oprot.writeMessageBegin('deleteStreamById', TMessageType.CALL, self._seqid)
args = deleteStreamById_args()
args.sessionId = sessionId
args.streamId = streamId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteStreamById(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteStreamById_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteStreamById failed: unknown result");
def deleteStreamByNameVersion(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
self.send_deleteStreamByNameVersion(sessionId, streamName, streamVersion)
return self.recv_deleteStreamByNameVersion()
def send_deleteStreamByNameVersion(self, sessionId, streamName, streamVersion):
self._oprot.writeMessageBegin('deleteStreamByNameVersion', TMessageType.CALL, self._seqid)
args = deleteStreamByNameVersion_args()
args.sessionId = sessionId
args.streamName = streamName
args.streamVersion = streamVersion
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteStreamByNameVersion(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteStreamByNameVersion_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteStreamByNameVersion failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["defineStream"] = Processor.process_defineStream
self._processMap["findStreamId"] = Processor.process_findStreamId
self._processMap["publish"] = Processor.process_publish
self._processMap["deleteStreamById"] = Processor.process_deleteStreamById
self._processMap["deleteStreamByNameVersion"] = Processor.process_deleteStreamByNameVersion
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_defineStream(self, seqid, iprot, oprot):
args = defineStream_args()
args.read(iprot)
iprot.readMessageEnd()
result = defineStream_result()
try:
result.success = self._handler.defineStream(args.sessionId, args.streamDefinition)
except Exception.ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException, ade:
result.ade = ade
except Exception.ttypes.ThriftMalformedStreamDefinitionException, mtd:
result.mtd = mtd
except Exception.ttypes.ThriftStreamDefinitionException, tde:
result.tde = tde
except Exception.ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("defineStream", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_findStreamId(self, seqid, iprot, oprot):
args = findStreamId_args()
args.read(iprot)
iprot.readMessageEnd()
result = findStreamId_result()
try:
result.success = self._handler.findStreamId(args.sessionId, args.streamName, args.streamVersion)
except Exception.ttypes.ThriftNoStreamDefinitionExistException, tnde:
result.tnde = tnde
except Exception.ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("findStreamId", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_publish(self, seqid, iprot, oprot):
args = publish_args()
args.read(iprot)
iprot.readMessageEnd()
result = publish_result()
try:
self._handler.publish(args.eventBundle)
except Exception.ttypes.ThriftUndefinedEventTypeException, ue:
result.ue = ue
except Exception.ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("publish", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteStreamById(self, seqid, iprot, oprot):
args = deleteStreamById_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteStreamById_result()
try:
result.success = self._handler.deleteStreamById(args.sessionId, args.streamId)
except Exception.ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("deleteStreamById", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteStreamByNameVersion(self, seqid, iprot, oprot):
args = deleteStreamByNameVersion_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteStreamByNameVersion_result()
try:
result.success = self._handler.deleteStreamByNameVersion(args.sessionId, args.streamName, args.streamVersion)
except Exception.ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("deleteStreamByNameVersion", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class defineStream_args:
"""
Attributes:
- sessionId
- streamDefinition
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamDefinition', None, None, ), # 2
)
def __init__(self, sessionId=None, streamDefinition=None,):
self.sessionId = sessionId
self.streamDefinition = streamDefinition
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamDefinition = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('defineStream_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamDefinition is not None:
oprot.writeFieldBegin('streamDefinition', TType.STRING, 2)
oprot.writeString(self.streamDefinition)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class defineStream_result:
"""
Attributes:
- success
- ade
- mtd
- tde
- se
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ade', (Exception.ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException, Exception.ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'mtd', (Exception.ttypes.ThriftMalformedStreamDefinitionException, Exception.ttypes.ThriftMalformedStreamDefinitionException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'tde', (Exception.ttypes.ThriftStreamDefinitionException, Exception.ttypes.ThriftStreamDefinitionException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'se', (Exception.ttypes.ThriftSessionExpiredException, Exception.ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ade=None, mtd=None, tde=None, se=None,):
self.success = success
self.ade = ade
self.mtd = mtd
self.tde = tde
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ade = Exception.ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException()
self.ade.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.mtd = Exception.ttypes.ThriftMalformedStreamDefinitionException()
self.mtd.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.tde = Exception.ttypes.ThriftStreamDefinitionException()
self.tde.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.se = Exception.ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('defineStream_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.ade is not None:
oprot.writeFieldBegin('ade', TType.STRUCT, 1)
self.ade.write(oprot)
oprot.writeFieldEnd()
if self.mtd is not None:
oprot.writeFieldBegin('mtd', TType.STRUCT, 2)
self.mtd.write(oprot)
oprot.writeFieldEnd()
if self.tde is not None:
oprot.writeFieldBegin('tde', TType.STRUCT, 3)
self.tde.write(oprot)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 4)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class findStreamId_args:
"""
Attributes:
- sessionId
- streamName
- streamVersion
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamName', None, None, ), # 2
(3, TType.STRING, 'streamVersion', None, None, ), # 3
)
def __init__(self, sessionId=None, streamName=None, streamVersion=None,):
self.sessionId = sessionId
self.streamName = streamName
self.streamVersion = streamVersion
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.streamVersion = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('findStreamId_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamName is not None:
oprot.writeFieldBegin('streamName', TType.STRING, 2)
oprot.writeString(self.streamName)
oprot.writeFieldEnd()
if self.streamVersion is not None:
oprot.writeFieldBegin('streamVersion', TType.STRING, 3)
oprot.writeString(self.streamVersion)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class findStreamId_result:
"""
Attributes:
- success
- tnde
- se
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'tnde', (Exception.ttypes.ThriftNoStreamDefinitionExistException, Exception.ttypes.ThriftNoStreamDefinitionExistException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'se', (Exception.ttypes.ThriftSessionExpiredException, Exception.ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, tnde=None, se=None,):
self.success = success
self.tnde = tnde
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.tnde = Exception.ttypes.ThriftNoStreamDefinitionExistException()
self.tnde.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.se = Exception.ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('findStreamId_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.tnde is not None:
oprot.writeFieldBegin('tnde', TType.STRUCT, 1)
self.tnde.write(oprot)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 2)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class publish_args:
"""
Attributes:
- eventBundle
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'eventBundle', (Data.ttypes.ThriftEventBundle, Data.ttypes.ThriftEventBundle.thrift_spec), None, ), # 1
)
def __init__(self, eventBundle=None,):
self.eventBundle = eventBundle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.eventBundle = Data.ttypes.ThriftEventBundle()
self.eventBundle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('publish_args')
if self.eventBundle is not None:
oprot.writeFieldBegin('eventBundle', TType.STRUCT, 1)
self.eventBundle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class publish_result:
"""
Attributes:
- ue
- se
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ue', (Exception.ttypes.ThriftUndefinedEventTypeException, Exception.ttypes.ThriftUndefinedEventTypeException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'se', (Exception.ttypes.ThriftSessionExpiredException, Exception.ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 2
)
def __init__(self, ue=None, se=None,):
self.ue = ue
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ue = Exception.ttypes.ThriftUndefinedEventTypeException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.se = Exception.ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('publish_result')
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 1)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 2)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamById_args:
"""
Attributes:
- sessionId
- streamId
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamId', None, None, ), # 2
)
def __init__(self, sessionId=None, streamId=None,):
self.sessionId = sessionId
self.streamId = streamId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamId = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamById_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamId is not None:
oprot.writeFieldBegin('streamId', TType.STRING, 2)
oprot.writeString(self.streamId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamById_result:
"""
Attributes:
- success
- se
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'se', (Exception.ttypes.ThriftSessionExpiredException, Exception.ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = Exception.ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamById_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamByNameVersion_args:
"""
Attributes:
- sessionId
- streamName
- streamVersion
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamName', None, None, ), # 2
(3, TType.STRING, 'streamVersion', None, None, ), # 3
)
def __init__(self, sessionId=None, streamName=None, streamVersion=None,):
self.sessionId = sessionId
self.streamName = streamName
self.streamVersion = streamVersion
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.streamVersion = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamByNameVersion_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamName is not None:
oprot.writeFieldBegin('streamName', TType.STRING, 2)
oprot.writeString(self.streamName)
oprot.writeFieldEnd()
if self.streamVersion is not None:
oprot.writeFieldBegin('streamVersion', TType.STRING, 3)
oprot.writeString(self.streamVersion)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamByNameVersion_result:
"""
Attributes:
- success
- se
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'se', (Exception.ttypes.ThriftSessionExpiredException, Exception.ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = Exception.ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamByNameVersion_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
py | b40fd082cab8c876c40e3e99357e4809058a80eb | #!/usr/bin/python3
from ftplib import FTP
def returnDefault():
try:
ftp = FTP(host, timeout=100)
ftp.login(username, password)
ftp.set_debuglevel(2)
print("login succesfull on host:{} with {}:{}".format(host, username, password))
# print("[*] Running nlst...")
print("[+] Welcome")
print(ftp.getwelcome())
print("[+] Setting passive mode")
# ftp.set_pasv(True)
print("[+] sencmd LIST")
# print(ftp.sendcmd('LIST'))
# print(ftp.retrlines('LIST'))
# print(ftp.dir())
# ftp.nlst()
print("[+] Doing nlst")
dirList = ftp.nlst()
print("[*] NLST Done: files {}".format(dirList))
ftp.quit()
except Exception as e:
dirlist = []
print(e)
print ('[-] Could not list directory contents.')
print ('[-] Skipping to next target.')
ftp.quit()
return
retList = []
for filename in dirList:
fn = filename.lower()
if '.php' in fn or '.htm' in fn or '.html' in fn or '.asp' in fn:
print ('[+] Found default page: {}'.format(filename))
retList.append(filename)
return retList
host = '192.168.10.6'
username = 'user1'
password = 'user1#!!'
returnDefault()
print("Closing connection. Script Completed.")
#here is how i made this script working
#https://stackoverflow.com/questions/58575896/python-fptlib-mlsd-generator-iteration-error |
py | b40fd0945ff62fceb5dc1a285e407dcb104ac3dd | """
Main module. Imports some functions to expose at the top level.
"""
from easytensor.auth import get_auth_token
from easytensor.urls import set_base_url
from easytensor.constants import Framework
from easytensor import tensorflow
from easytensor import pytorch
from easytensor import transformers
|
py | b40fd0d1e1035f2e283dc8f1a19a547a18e20f2a | import sys
import logging
import logging.handlers
import os
LOGGER_NAME = "Log"
LOG_DIR = '.log'
def setup_logging(logger_name = LOGGER_NAME, logger_file_name = '%s_log.log'%(LOGGER_NAME), logger_directory = LOG_DIR):
max_log_file_size = 500 * 1024 # 500 KB
max_backup_count = 10
if not os.path.isdir(logger_directory):
os.makedirs(logger_directory)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s | %(pathname)s:%(lineno)d | %(funcName)s | %(levelname)s | %(message)s ')
# Log to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# Log to file
log_path = os.path.join(logger_directory, logger_file_name)
rotating_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=max_log_file_size, backupCount=max_backup_count)
rotating_handler.setLevel(logging.DEBUG)
rotating_handler.setFormatter(formatter)
logger.addHandler(rotating_handler)
setup_logging()
|
py | b40fd21a74c17ca82c000c94bcdc2b7e09fcc0e6 | import logging
from collections import defaultdict
import angr
import archinfo
from archinfo.arch_arm import is_arm_arch
import claripy
import networkx
from . import Analysis
from .cfg.cfg_job_base import BlockID, FunctionKey, CFGJobBase
from .cfg.cfg_utils import CFGUtils
from .forward_analysis import ForwardAnalysis
from .. import sim_options
from ..engines.procedure import ProcedureMixin
from ..engines import SimSuccessors
from ..errors import AngrDelayJobNotice, AngrSkipJobNotice, AngrVFGError, AngrError, AngrVFGRestartAnalysisNotice, \
AngrJobMergingFailureNotice, SimValueError, SimIRSBError, SimError
from ..procedures import SIM_PROCEDURES
from ..state_plugins.callstack import CallStack
l = logging.getLogger(name=__name__)
class VFGJob(CFGJobBase):
"""
A job descriptor that contains local variables used during VFG analysis.
"""
def __init__(self, *args, **kwargs):
super(VFGJob, self).__init__(*args, **kwargs)
self.call_stack_suffix = None
self.vfg_node = None
self.is_call_jump = None
self.call_target = None
self.dbg_exit_status = {}
self.is_return_jump = None
self.sim_successors = None
# if this job has a call successor, do we plan to skip the call successor or not
self.call_skipped = False
# if the call is skipped, calling stack of the skipped function is saved in `call_context_key`
self.call_function_key = None # type: FunctionKey
self.call_task = None # type: CallAnalysis
@property
def block_id(self):
return self._block_id
def callstack_repr(self, kb=None):
s = [ ]
for i in range(0, len(self.call_stack_suffix), 2):
call_site, func_addr = self.call_stack_suffix[i], self.call_stack_suffix[i + 1] # pylint:disable=unsubscriptable-object
if func_addr is None:
continue
call_site_str = "%#x" % call_site if call_site is not None else "None"
if func_addr in kb.functions:
s.append("%s[%s]" % (kb.functions[func_addr].name, call_site_str))
else:
s.append("%#x[%s]" % (func_addr, call_site_str))
return "//".join(s)
class PendingJob(object):
__slots__ = ('block_id', 'state', 'call_stack', 'src_block_id', 'src_stmt_idx', 'src_ins_addr', )
def __init__(self, block_id, state, call_stack, src_block_id, src_stmt_idx, src_ins_addr):
self.block_id = block_id
self.state = state
self.call_stack = call_stack
self.src_block_id = src_block_id
self.src_stmt_idx = src_stmt_idx
self.src_ins_addr = src_ins_addr
class AnalysisTask(object):
"""
An analysis task describes a task that should be done before popping this task out of the task stack and discard it.
"""
def __init__(self):
pass
@property
def done(self):
raise NotImplementedError()
class FunctionAnalysis(AnalysisTask):
"""
Analyze a function, generate fix-point states from all endpoints of that function, and then merge them to one state.
"""
def __init__(self, function_address, return_address):
super(FunctionAnalysis, self).__init__()
self.function_address = function_address
self.return_address = return_address
self.call_analysis = None
# tracks all jobs that are live currently
self.jobs = [ ]
def __repr__(self):
s = "<Function @ %#08x with %d jobs>" % (self.function_address, len(self.jobs))
return s
#
# Properties
#
@property
def done(self):
return not self.jobs
class CallAnalysis(AnalysisTask):
"""
Analyze a call by analyze all functions this call might be calling, collect all final states generated by analyzing
those functions, and merge them into one state.
"""
def __init__(self, address, return_address, function_analysis_tasks=None, mergeable_plugins=None):
super(CallAnalysis, self).__init__()
self.address = address
self.return_address = return_address
self.function_analysis_tasks = function_analysis_tasks if function_analysis_tasks is not None else [ ]
self._mergeable_plugins = mergeable_plugins
self.skipped = False
self._final_jobs = [ ]
def __repr__(self):
s = "<Call @ %#08x with %d function tasks>" % (self.address, len(self.function_analysis_tasks))
return s
#
# Properties
#
@property
def done(self):
for task in self.function_analysis_tasks:
if not task.done:
return False
return True
#
# Public methods
#
def register_function_analysis(self, task):
assert isinstance(task, FunctionAnalysis)
self.function_analysis_tasks.append(task)
task.call_analysis = self
def add_final_job(self, job):
self._final_jobs.append(job)
def merge_jobs(self):
assert self._final_jobs
job = self._final_jobs[0]
for other in self._final_jobs[1:]:
job.state = job.state.merge(other.state, plugin_whitelist=self._mergeable_plugins)[0]
return job
class VFGNode(object):
"""
A descriptor of nodes in a Value-Flow Graph
"""
def __init__(self, addr, key, state=None):
"""
Constructor.
:param int addr:
:param BlockID key:
:param SimState state:
"""
self.key = key
self.addr = addr
self.state = None
self.widened_state = None
self.narrowing_times = 0
self.all_states = [ ]
self.events = [ ]
self.input_variables = [ ]
self.actions = [ ]
self.final_states = [ ]
if state:
self.all_states.append(state)
self.state = state
def __hash__(self):
return hash(self.key)
def __eq__(self, o):
return (type(self) == type(o) and # pylint:disable=unidiomatic-typecheck
self.key == o.key and self.addr == o.addr and
self.state == o.state and self.actions == o.actions and
self.events == o.events and self.narrowing_times == o.narrowing_times and
self.all_states == o.all_states and self.widened_state == o.widened_state and
self.input_variables == o.input_variables)
def __repr__(self):
s = "VFGNode[%#x] <%s>" % (self.addr, repr(self.key))
return s
def append_state(self, s, is_widened_state=False):
"""
Appended a new state to this VFGNode.
:param s: The new state to append
:param is_widened_state: Whether it is a widened state or not.
"""
if not is_widened_state:
self.all_states.append(s)
self.state = s
else:
self.widened_state = s
class VFG(ForwardAnalysis, Analysis): # pylint:disable=abstract-method
"""
This class represents a control-flow graph with static analysis result.
Perform abstract interpretation analysis starting from the given function address. The output is an invariant at
the beginning (or the end) of each basic block.
Steps:
- Generate a CFG first if CFG is not provided.
- Identify all merge points (denote the set of merge points as Pw) in the CFG.
- Cut those loop back edges (can be derived from Pw) so that we gain an acyclic CFG.
- Identify all variables that are 1) from memory loading 2) from initial values, or 3) phi functions. Denote
the set of those variables as S_{var}.
- Start real AI analysis and try to compute a fix point of each merge point. Perform widening/narrowing only on
variables \\in S_{var}.
"""
# TODO: right now the graph traversal method is not optimal. A new solution is needed to minimize the iteration we
# TODO: access each node in the graph
def __init__(self,
cfg=None,
context_sensitivity_level=2,
start=None,
function_start=None,
interfunction_level=0,
initial_state=None,
avoid_runs=None,
remove_options=None,
timeout=None,
max_iterations_before_widening=8,
max_iterations=40,
widening_interval=3,
final_state_callback=None,
status_callback=None,
record_function_final_states=False
):
"""
:param cfg: The control-flow graph to base this analysis on. If none is provided, we will
construct a CFGEmulated.
:param context_sensitivity_level: The level of context-sensitivity of this VFG.
It ranges from 0 to infinity. Default 2.
:param function_start: The address of the function to analyze.
:param interfunction_level: The level of interfunction-ness to be
:param initial_state: A state to use as the initial one
:param avoid_runs: A list of runs to avoid
:param remove_options: State options to remove from the initial state. It only works when `initial_state` is
None
:param int timeout:
"""
ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=True,
status_callback=status_callback
)
# Related CFG.
# We can still perform analysis if you don't specify a CFG. But providing a CFG may give you better result.
self._cfg = cfg
# Where to start the analysis
self._start = start if start is not None else self.project.entry
self._function_start = function_start if function_start is not None else self._start
# Other parameters
self._avoid_runs = [ ] if avoid_runs is None else avoid_runs
self._context_sensitivity_level = context_sensitivity_level
self._interfunction_level = interfunction_level
self._state_options_to_remove = set() if remove_options is None else remove_options
self._timeout = timeout
self._start_at_function = self._start == self._function_start
self._initial_state = initial_state
self._max_iterations_before_widening = max_iterations_before_widening
self._max_iterations = max_iterations
self._widening_interval = widening_interval
self._final_state_callback = final_state_callback
self._record_function_final_states = record_function_final_states
self._nodes = {} # all the vfg nodes, keyed on block IDs
self._normal_states = { } # Last available state for each program point without widening
self._widened_states = { } # States on which widening has occurred
# Initial states of each function, which is context sensitive
# It maps function key to its states
self._function_initial_states = defaultdict(dict)
# Final states of each function, right after `ret` is called. Also context sensitive.
# even if a function may have multiple return sites, as long as they all return to the same place, there is
# only one final state of that function.
self._function_final_states = defaultdict(dict)
# All final states are put in this list
self.final_states = [ ]
self._state_initialization_map = defaultdict(list)
self._exit_targets = defaultdict(list) # A dict to log edges and the jumpkind between each basic block
# A dict to record all blocks that returns to a specific address
self._return_target_sources = defaultdict(list)
self._pending_returns = {}
self._thumb_addrs = set() # set of all addresses that are code in thumb mode
self._final_address = None # Address of the very last instruction. The analysis is terminated there.
self._function_merge_points = {}
self._function_widening_points = {}
self._function_node_addrs = {} # sorted in reverse post-order
self._mergeable_plugins = ('memory', 'registers')
self._task_stack = [ ]
self._tracing_times = defaultdict(int)
# counters for debugging
self._execution_counter = defaultdict(int)
# Start analysis
self._analyze()
#
# Internal properties
#
@property
def _current_function_address(self):
return self._task_stack[-1].function_address
@property
def _top_task(self):
"""
Get the first task in the stack.
:return: The top task in the stack, or None if the stack is empty.
:rtype: AnalysisTask
"""
if not self._task_stack:
return None
return self._task_stack[-1]
@property
def _top_function_analysis_task(self):
"""
Get the first FunctionAnalysis task in the stack.
:return: The top function analysis task in the stack, or None if there isn't any.
:rtype: FunctionAnalysis
"""
for r in reversed(self._task_stack):
if isinstance(r, FunctionAnalysis):
return r
return None
@property
def function_initial_states(self):
return self._function_initial_states
@property
def function_final_states(self):
return self._function_final_states
#
# Public methods
#
def get_any_node(self, addr):
"""
Get any VFG node corresponding to the basic block at @addr.
Note that depending on the context sensitivity level, there might be
multiple nodes corresponding to different contexts. This function will
return the first one it encounters, which might not be what you want.
"""
for n in self.graph.nodes():
if n.addr == addr:
return n
def irsb_from_node(self, node):
return self.project.factory.successors(node.state, addr=node.addr)
#
# Operations
#
def copy(self):
new_vfg = VFG(self.project)
new_vfg._cfg = self._cfg
new_vfg._graph = networkx.DiGraph(self.graph)
new_vfg._nodes = self._nodes.copy()
new_vfg._exit_targets = defaultdict(list, self._exit_targets)
return new_vfg
# Pickling helpers
def __setstate__(self, s):
self.__dict__.update(s)
def __getstate__(self):
return dict(self.__dict__)
#
# Main analysis routines, mostly overriding methods of ForwardAnalysis
#
def _pre_analysis(self):
"""
Executed before analysis starts. Necessary initializations are performed here.
:return: None
"""
l.debug("Starting from %#x", self._start)
# initialize the task stack
self._task_stack = [ ]
# initialize the execution counter dict
self._execution_counter = defaultdict(int)
# Generate a CFG if no CFG is provided
if not self._cfg:
l.debug("Generating a CFG, since none was given...")
# TODO: can we use a fast CFG instead? note that fast CFG does not care of context sensitivity at all, but
# TODO: for state merging, we also don't really care about context sensitivity.
self._cfg = self.project.analyses.CFGEmulated(context_sensitivity_level=self._context_sensitivity_level,
starts=(self._start,)
)
if not self._cfg.normalized:
l.warning("The given CFG is not normalized, which might impact the performance/accuracy of the VFG "
"analysis.")
# Prepare the state
initial_state = self._prepare_initial_state(self._start, self._initial_state)
initial_state.ip = self._start
if self.project.arch.name.startswith('MIPS'):
initial_state.regs.t9 = self._start
# clear function merge points cache
self._function_merge_points = {}
# Create the initial state
state = initial_state.copy()
if self._start_at_function:
# set the return address to an address so we can catch it and terminate the VSA analysis
# TODO: Properly pick an address that will not conflict with any existing code and data in the program
self._final_address = 0x4fff0000
self._set_return_address(state, self._final_address)
call_stack = None
if not self._start_at_function:
# we should build a custom call stack
call_stack = CallStack()
call_stack = call_stack.call(None, self._function_start, retn_target=self._final_address)
job = VFGJob(state.addr, state, self._context_sensitivity_level,
jumpkind='Ijk_Boring', final_return_address=self._final_address,
call_stack=call_stack
)
block_id = BlockID.new(state.addr, job.get_call_stack_suffix(), job.jumpkind)
job._block_id = block_id
self._insert_job(job)
# create the task
function_analysis_task = FunctionAnalysis(self._function_start, self._final_address)
function_analysis_task.jobs.append(job)
self._task_stack.append(function_analysis_task)
def _job_sorting_key(self, job):
"""
Get the sorting key of a VFGJob instance.
:param VFGJob job: the VFGJob object.
:return: An integer that determines the order of this job in the queue.
:rtype: int
"""
MAX_BLOCKS_PER_FUNCTION = 1000000
task_functions = list(reversed(
list(task.function_address for task in self._task_stack if isinstance(task, FunctionAnalysis))
))
try:
function_pos = task_functions.index(job.func_addr)
except ValueError:
# not in the list
# it might be because we followed the wrong path, or there is a bug in the traversal algorithm
# anyways, do it first
l.warning('Function address %#x is not found in task stack.', job.func_addr)
return 0
try:
block_in_function_pos = self._ordered_node_addrs(job.func_addr).index(job.addr)
except ValueError:
# block not found. what?
block_in_function_pos = min(job.addr - job.func_addr, MAX_BLOCKS_PER_FUNCTION - 1)
return block_in_function_pos + MAX_BLOCKS_PER_FUNCTION * function_pos
# return self._cfg.get_topological_order(self._cfg.get_node(job.block_id))
def _job_key(self, job):
"""
Return the block ID of the job. Two or more jobs owning the same block ID will be merged together.
:param VFGJob job: The VFGJob instance.
:return: The block ID of the job
:rtype: BlockID
"""
return job.block_id
def _pre_job_handling(self, job):
"""
Some code executed before actually processing the job.
:param VFGJob job: the VFGJob object.
:return: None
"""
# did we reach the final address?
if self._final_address is not None and job.addr == self._final_address:
# our analysis should be termianted here
l.debug("%s is viewed as a final state. Skip.", job)
raise AngrSkipJobNotice()
l.debug("Handling VFGJob %s", job)
if not self._top_task:
l.debug("No more tasks available. Skip the job.")
raise AngrSkipJobNotice()
assert isinstance(self._top_task, FunctionAnalysis)
if job not in self._top_task.jobs:
# it seems that all jobs of the top task has been done. unwind the task stack
# make sure this job is at least recorded somewhere
unwind_count = None
for i, task in enumerate(reversed(self._task_stack)):
if isinstance(task, FunctionAnalysis):
if job in task.jobs:
# nice
unwind_count = i
if unwind_count is None:
l.debug("%s is not recorded. Skip the job.", job)
raise AngrSkipJobNotice()
else:
# unwind the stack till the target, unless we see any pending jobs for each new top task
for i in range(unwind_count):
if isinstance(self._top_task, FunctionAnalysis):
# are there any pending job belonging to the current function that we should handle first?
pending_job_key = self._get_pending_job(self._top_task.function_address)
if pending_job_key is not None:
# ah there is
# analyze it first
self._trace_pending_job(pending_job_key)
l.debug("A pending job is found for function %#x. Delay %s.",
self._top_task.function_address, job)
raise AngrDelayJobNotice()
task = self._task_stack.pop()
if not task.done:
l.warning("Removing an unfinished task %s. Might be a bug.", task)
assert job in self._top_task.jobs
# check if this is considered to be a final state
if self._final_state_callback is not None and self._final_state_callback(job.state, job.call_stack):
l.debug("%s.state is considered as a final state. Skip the job.", job)
self.final_states.append(job.state)
raise AngrSkipJobNotice()
# increment the execution counter
self._execution_counter[job.addr] += 1
self._top_task.jobs.remove(job)
# set up some essential variables and parameters
job.call_stack_suffix = job.get_call_stack_suffix()
job.jumpkind = 'Ijk_Boring' if job.state.history.jumpkind is None else \
job.state.history.jumpkind
src_block_id = job.src_block_id
src_exit_stmt_idx = job.src_exit_stmt_idx
addr = job.state.solver.eval(job.state.regs.ip)
input_state = job.state
block_id = BlockID.new(addr, job.call_stack_suffix, job.jumpkind)
if self._tracing_times[block_id] > self._max_iterations:
l.debug('%s has been traced too many times. Skip', job)
raise AngrSkipJobNotice()
self._tracing_times[block_id] += 1
if block_id not in self._nodes:
vfg_node = VFGNode(addr, block_id, state=input_state)
self._nodes[block_id] = vfg_node
else:
vfg_node = self._nodes[block_id]
job.vfg_node = vfg_node
# log the current state
vfg_node.state = input_state
# Execute this basic block with input state, and get a new SimSuccessors instance
# unused result var is `error_occured`
job.sim_successors, _, restart_analysis = self._get_simsuccessors(input_state, addr)
if restart_analysis:
# We should restart the analysis because of something must be changed in the very initial state
raise AngrVFGRestartAnalysisNotice()
if job.sim_successors is None:
# Ouch, we cannot get the SimSuccessors for some reason
# Skip this guy
l.debug('Cannot create SimSuccessors for %s. Skip.', job)
raise AngrSkipJobNotice()
self._graph_add_edge(src_block_id,
block_id,
jumpkind=job.jumpkind,
src_exit_stmt_idx=src_exit_stmt_idx
)
def _get_successors(self, job):
# Extract initial values
state = job.state
addr = job.addr
# Obtain successors
if addr not in self._avoid_runs:
all_successors = job.sim_successors.flat_successors + job.sim_successors.unconstrained_successors
else:
all_successors = []
# save those states
job.vfg_node.final_states = all_successors[:]
# Update thumb_addrs
if job.sim_successors.sort == 'IRSB' and state.thumb:
self._thumb_addrs.update(job.sim_successors.artifacts['insn_addrs'])
if not all_successors:
if job.sim_successors.sort == 'SimProcedure' and isinstance(job.sim_successors.artifacts['procedure'],
SIM_PROCEDURES["stubs"]["PathTerminator"]):
# If there is no valid exit in this branch and it's not
# intentional (e.g. caused by a SimProcedure that does not
# do_return) , we should make it return to its callsite.
# However, we don't want to use its state as it might be
# corrupted. Just create a link in the exit_targets map.
retn_target = job.call_stack.current_return_target
if retn_target is not None:
new_call_stack = job.call_stack_copy()
exit_target_tpl = new_call_stack.stack_suffix(self._context_sensitivity_level) + (retn_target,)
self._exit_targets[job.call_stack_suffix + (addr,)].append(
(exit_target_tpl, 'Ijk_Ret'))
else:
# This is intentional. We shall remove all the pending returns generated before along this path.
self._remove_pending_return(job, self._pending_returns)
# If this is a call exit, we shouldn't put the default exit (which
# is artificial) into the CFG. The exits will be Ijk_Call and
# Ijk_FakeRet, and Ijk_Call always goes first
job.is_call_jump = any([self._is_call_jumpkind(i.history.jumpkind) for i in all_successors])
call_targets = [i.solver.eval_one(i.ip) for i in all_successors if self._is_call_jumpkind(i.history.jumpkind)]
job.call_target = None if not call_targets else call_targets[0]
job.is_return_jump = len(all_successors) and all_successors[0].history.jumpkind == 'Ijk_Ret'
if job.is_call_jump:
# create the call task
# TODO: correctly fill the return address
call_task = CallAnalysis(job.addr, None, [ ], mergeable_plugins=self._mergeable_plugins)
self._task_stack.append(call_task)
job.call_task = call_task
return all_successors
def _handle_successor(self, job, successor, all_successors):
"""
Process each successor generated by the job, and return a new list of succeeding jobs.
:param VFGJob job: The VFGJob instance.
:param SimState successor: The succeeding state.
:param list all_successors: A list of all successors.
:return: A list of newly created jobs from the successor.
:rtype: list
"""
# Initialize parameters
addr = job.addr
jumpkind = successor.history.jumpkind
#
# Get instruction pointer
#
if job.is_return_jump:
ret_target = job.call_stack.current_return_target
if ret_target is None:
# We have no where to go according to our call stack. However, the callstack might be corrupted
l.debug("According to the call stack, we have nowhere to return to.")
return [ ]
successor.ip = ret_target
# this try-except block is to handle cases where the instruction pointer is symbolic
try:
successor_addrs = successor.solver.eval_upto(successor.ip, 2)
except SimValueError:
# TODO: Should fall back to reading targets from CFG
# It cannot be concretized currently. Maybe we could handle
# it later, maybe it just cannot be concretized
return [ ]
if len(successor_addrs) > 1:
# multiple concrete targets
if job.is_return_jump:
# It might be caused by state merging
# We may retrieve the correct ip from call stack
successor.ip = job.call_stack.current_return_target
else:
return self._handle_successor_multitargets(job, successor, all_successors)
# Now there should be one single target for the successor
successor_addr = successor.solver.eval_one(successor.ip)
# Get the fake ret successor
fakeret_successor = None
if self._is_call_jumpkind(jumpkind):
fakeret_successor = all_successors[-1]
# If the function we're calling into doesn't return, we should discard it
if self._cfg is not None:
func = self.kb.functions.function(addr=job.call_target)
if func is not None and func.returning is False and len(all_successors) == 2:
del all_successors[-1]
fakeret_successor = None
if self._is_call_jumpkind(jumpkind):
# Create a new call stack for the successor
new_call_stack = self._create_callstack(job, successor_addr, jumpkind, fakeret_successor)
if new_call_stack is None:
l.debug("Cannot create a new callstack for address %#x", successor_addr)
job.dbg_exit_status[successor] = ""
return [ ]
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level)
new_function_key = FunctionKey.new(successor_addr, new_call_stack_suffix)
# Save the initial state for the function
self._save_function_initial_state(new_function_key, successor_addr, successor.copy())
# bail out if we hit the interfunction_level cap
if len(job.call_stack) >= self._interfunction_level:
l.debug('We are not tracing into a new function %#08x as we hit interfunction_level limit', successor_addr)
# mark it as skipped
job.dbg_exit_status[successor] = "Skipped"
job.call_skipped = True
job.call_function_key = new_function_key
job.call_task.skipped = True
return [ ]
elif jumpkind == 'Ijk_Ret':
# Pop the current function out from the call stack
new_call_stack = self._create_callstack(job, successor_addr, jumpkind, fakeret_successor)
if new_call_stack is None:
l.debug("Cannot create a new callstack for address %#x", successor_addr)
job.dbg_exit_status[successor] = ""
return [ ]
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level)
else:
new_call_stack = job.call_stack
new_call_stack_suffix = job.call_stack_suffix
# Generate the new block ID
new_block_id = BlockID.new(successor_addr, new_call_stack_suffix, jumpkind)
#
# Generate new VFG jobs
#
if jumpkind == "Ijk_Ret":
assert not job.is_call_jump
# Record this return
self._return_target_sources[successor_addr].append(job.call_stack_suffix + (addr,))
# Check if this return is inside our pending returns list
if new_block_id in self._pending_returns:
del self._pending_returns[new_block_id]
# Check if we have reached a fix-point
if jumpkind != 'Ijk_FakeRet' and \
new_block_id in self._nodes:
last_state = self._nodes[new_block_id].state
_, _, merged = last_state.merge(successor, plugin_whitelist=self._mergeable_plugins)
if merged:
l.debug("%s didn't reach a fix-point", new_block_id)
else:
l.debug("%s reaches a fix-point.", new_block_id)
job.dbg_exit_status[successor] = "Merged due to reaching a fix-point"
return [ ]
new_jobs = self._create_new_jobs(job, successor, new_block_id, new_call_stack)
return new_jobs
def _handle_successor_multitargets(self, job, successor, all_successors):
"""
Generate new jobs for all possible successor targets when there are more than one possible concrete value for
successor.ip
:param VFGJob job: The VFGJob instance.
:param SimState successor: The succeeding state.
:param list all_successors: All succeeding states from the same VFGJob.
:return: A list of new succeeding jobs
:rtype: list
"""
new_jobs = [ ]
# Currently we assume a legit jumping target cannot have more than 256 concrete values
# TODO: make it a setting on VFG
MAX_NUMBER_OF_CONCRETE_VALUES = 256
all_possible_ips = successor.solver.eval_upto(successor.ip, MAX_NUMBER_OF_CONCRETE_VALUES + 1)
if len(all_possible_ips) > MAX_NUMBER_OF_CONCRETE_VALUES:
l.warning("IP can be concretized to more than %d values, which means it might be corrupted.",
MAX_NUMBER_OF_CONCRETE_VALUES)
return [ ]
# Call this function to generate a successor for each possible IP
for ip in all_possible_ips:
concrete_successor = successor.copy()
concrete_successor.ip = ip
concrete_jobs = self._handle_successor(job, concrete_successor, all_successors)
if job.is_call_jump: # TODO: take care of syscalls
for new_job in concrete_jobs:
# TODO: correctly fill the return address. The return address can be found from the
# TODO: fakeret successor in the `successors` list
function_analysis_task = FunctionAnalysis(new_job.addr, None)
# log the new job
function_analysis_task.jobs.append(new_job)
# put it onto the stack
self._task_stack.append(function_analysis_task)
# log it in the call_task
job.call_task.register_function_analysis(function_analysis_task)
new_jobs.extend(concrete_jobs)
return new_jobs
def _post_job_handling(self, job, new_jobs, successors): # pylint:disable=unused-argument
# Debugging output
if l.level == logging.DEBUG:
self._post_job_handling_debug(job, successors)
# pop all finished tasks from the task stack
pending_task_func_addrs = set(k.func_addr for k in self._pending_returns.keys())
while True:
task = self._top_task
if task is None:
# the task stack is empty
break
if isinstance(task, CallAnalysis):
# the call never returns
if task.skipped:
l.debug("Calls from %s are skipped.", task)
else:
l.debug('%s never returns.', task)
self._task_stack.pop()
else:
if not task.done or task.function_address in pending_task_func_addrs:
break
else:
l.debug('%s is finished.', task)
self._task_stack.pop()
# the next guy *might be* a call analysis task
task = self._top_task
if isinstance(task, CallAnalysis):
if task.done:
# awesome!
# pop it from the task stack
self._task_stack.pop()
if task._final_jobs:
# merge all jobs, and create a new job
new_job = task.merge_jobs()
# register the job to the top task
self._top_task.jobs.append(new_job)
# insert the job
self._insert_job(new_job)
#if not new_jobs:
# # task stack is empty
# self.final_states.append(job.state)
def _intra_analysis(self):
pass
def _merge_jobs(self, *jobs):
l.debug("Merging jobs %s", jobs)
# there should not be more than two jobs being merged at the same time
assert len(jobs) == 2
addr = jobs[0].addr
if self.project.is_hooked(addr) and \
self.project.hooked_by(addr).is_continuation:
raise AngrJobMergingFailureNotice()
# update jobs
for job in jobs:
if job in self._top_function_analysis_task.jobs:
self._top_function_analysis_task.jobs.remove(job)
state_0 = jobs[0].state
state_1 = jobs[1].state
merged_state, _ = self._merge_states(state_0, state_1)
new_job = VFGJob(jobs[0].addr, merged_state, self._context_sensitivity_level, jumpkind=jobs[0].jumpkind,
block_id=jobs[0].block_id, call_stack=jobs[0].call_stack, src_block_id=jobs[0].src_block_id,
src_exit_stmt_idx=jobs[0].src_exit_stmt_idx, src_ins_addr=jobs[0].src_ins_addr,
)
self._top_function_analysis_task.jobs.append(new_job)
return new_job
def _should_widen_jobs(self, *jobs):
"""
:param iterable jobs:
:return: True if should widen, Flase otherwise
:rtype: bool
"""
job_0, _ = jobs[-2:] # type: VFGJob
addr = job_0.addr
if not addr in self._widening_points(job_0.func_addr):
return False
tracing_times = self._tracing_times[job_0.block_id]
if tracing_times > self._max_iterations_before_widening and tracing_times % self._widening_interval == 0:
return True
return False
def _widen_jobs(self, *jobs):
"""
:param iterable jobs:
:return:
"""
job_0, job_1 = jobs[-2:] # type: VFGJob
# update jobs
for job in jobs:
if job in self._top_function_analysis_task.jobs:
self._top_function_analysis_task.jobs.remove(job)
l.debug("Widening %s", job_1)
new_state, _ = self._widen_states(job_0.state, job_1.state)
# print "job_0.state.eax =", job_0.state.regs.eax._model_vsa, "job_1.state.eax =", job_1.state.regs.eax._model_vsa
# print "new_job.state.eax =", new_state.regs.eax._model_vsa
new_job = VFGJob(jobs[0].addr, new_state, self._context_sensitivity_level, jumpkind=jobs[0].jumpkind,
block_id=jobs[0].block_id, call_stack=jobs[0].call_stack, src_block_id=jobs[0].src_block_id,
src_exit_stmt_idx=jobs[0].src_exit_stmt_idx, src_ins_addr=jobs[0].src_ins_addr,
)
self._top_function_analysis_task.jobs.append(new_job)
return new_job
def _job_queue_empty(self):
if self._pending_returns:
# We don't have any paths remaining. Let's pop a previously-missing return to
# process
top_task = self._top_task # type: FunctionAnalysis
func_addr = top_task.function_address
pending_ret_key = self._get_pending_job(func_addr)
if pending_ret_key is None:
# analysis of the current function is somehow terminated
# we have to rewind the stack, and try the function that calls the current function
l.debug('No pending return for the current function %#x. Unwind the stack.', func_addr)
if not self._top_function_analysis_task.done:
l.warning('The top function analysis task is not done yet. This might be a bug. '
'Please report to Fish.')
# stack unwinding
while True:
s = self._task_stack.pop()
if isinstance(s, CallAnalysis):
break
return self._job_queue_empty()
self._trace_pending_job(pending_ret_key)
l.debug("Tracing a missing return %s", repr(pending_ret_key))
def _post_analysis(self):
pass
#
# State widening, merging, and narrowing
#
def _merge_states(self, old_state, new_state):
"""
Merge two given states, and return a new one.
:param old_state:
:param new_state:
:returns: The merged state, and whether a merging has occurred
"""
# print old_state.dbg_print_stack()
# print new_state.dbg_print_stack()
merged_state, _, merging_occurred = old_state.merge(new_state, plugin_whitelist=self._mergeable_plugins)
# print "Merged: "
# print merged_state.dbg_print_stack()
return merged_state, merging_occurred
@staticmethod
def _widen_states(old_state, new_state):
"""
Perform widen operation on the given states, and return a new one.
:param old_state:
:param new_state:
:returns: The widened state, and whether widening has occurred
"""
# print old_state.dbg_print_stack()
# print new_state.dbg_print_stack()
l.debug('Widening state at IP %s', old_state.ip)
widened_state, widening_occurred = old_state.widen(new_state)
# print "Widened: "
# print widened_state.dbg_print_stack()
return widened_state, widening_occurred
@staticmethod
def _narrow_states(node, old_state, new_state, previously_widened_state): # pylint:disable=unused-argument,no-self-use
"""
Try to narrow the state!
:param old_state:
:param new_state:
:param previously_widened_state:
:returns: The narrowed state, and whether a narrowing has occurred
"""
l.debug('Narrowing state at IP %s', previously_widened_state.ip)
s = previously_widened_state.copy()
narrowing_occurred = False
# TODO: Finish the narrowing logic
return s, narrowing_occurred
#
# Helper methods
#
def _prepare_initial_state(self, function_start, state):
"""
Get the state to start the analysis for function.
:param int function_start: Address of the function
:param SimState state: The program state to base on.
"""
if state is None:
state = self.project.factory.blank_state(mode="static",
remove_options=self._state_options_to_remove
)
# make room for arguments passed to the function
sp = state.regs.sp
sp_val = state.solver.eval_one(sp)
state.memory.set_stack_address_mapping(sp_val,
state.memory.stack_id(function_start) + '_pre',
0
)
state.registers.store('sp', sp - 0x100)
# Set the stack address mapping for the initial stack
state.memory.set_stack_size(state.arch.stack_size)
initial_sp = state.solver.eval(state.regs.sp) # FIXME: This is bad, as it may lose tracking of multiple sp values
initial_sp -= state.arch.bytes
state.memory.set_stack_address_mapping(initial_sp,
state.memory.stack_id(function_start),
function_start
)
return state
def _set_return_address(self, state, ret_addr):
"""
Set the return address of the current state to a specific address. We assume we are at the beginning of a
function, or in other words, we are about to execute the very first instruction of the function.
:param SimState state: The program state
:param int ret_addr: The return address
:return: None
"""
# TODO: the following code is totally untested other than X86 and AMD64. Don't freak out if you find bugs :)
# TODO: Test it
ret_bvv = state.solver.BVV(ret_addr, self.project.arch.bits)
if self.project.arch.name in ('X86', 'AMD64'):
state.stack_push(ret_bvv)
elif is_arm_arch(self.project.arch):
state.regs.lr = ret_bvv
elif self.project.arch.name in ('MIPS32', 'MIPS64'):
state.regs.ra = ret_bvv
elif self.project.arch.name in ('PPC32', 'PPC64'):
state.regs.lr = ret_bvv
else:
l.warning('Return address cannot be set for architecture %s. Please add corresponding logic to '
'VFG._set_return_address().', self.project.arch.name
)
def _create_graph(self, return_target_sources=None):
"""
Create a DiGraph out of the existing edge map.
:param return_target_sources: Used for making up those missing returns
:returns: A networkx.DiGraph() object
"""
if return_target_sources is None:
# We set it to a defaultdict in order to be consistent with the
# actual parameter.
return_target_sources = defaultdict(list)
cfg = networkx.DiGraph()
# The corner case: add a node to the graph if there is only one block
if len(self._nodes) == 1:
cfg.add_node(self._nodes[next(iter(self._nodes.keys()))])
# Adding edges
for tpl, targets in self._exit_targets.items():
basic_block = self._nodes[tpl] # Cannot fail :)
for ex, jumpkind in targets:
if ex in self._nodes:
target_bbl = self._nodes[ex]
cfg.add_edge(basic_block, target_bbl, jumpkind=jumpkind)
# Add edges for possibly missing returns
if basic_block.addr in return_target_sources:
for src_irsb_key in \
return_target_sources[basic_block.addr]:
cfg.add_edge(self._nodes[src_irsb_key],
basic_block, jumpkind="Ijk_Ret")
else:
# Debugging output
def addr_formalize(addr):
if addr is None:
return "None"
else:
return "%#08x" % addr
s = "(["
for addr in ex[:-1]:
s += addr_formalize(addr) + ", "
s += "] %s)" % addr_formalize(ex[-1])
l.warning("Key %s does not exist.", s)
return cfg
#
# DiGraph manipulation
#
def _graph_get_node(self, block_id, terminator_for_nonexistent_node=False):
"""
Get an existing VFGNode instance from the graph.
:param BlockID block_id: The block ID for the node to get.
:param bool terminator_for_nonexistent_node: True if a Terminator (which is a SimProcedure stub) should be
created when there is no existing node available for the given
block ID.
:return: A node in the graph, or None.
:rtype: VFGNode
"""
if block_id not in self._nodes:
l.error("Trying to look up a node that we don't have yet. Is this okay????")
if not terminator_for_nonexistent_node:
return None
# Generate a PathTerminator node
addr = block_id.addr
func_addr = block_id.func_addr
if func_addr is None:
# We'll have to use the current block address instead
# TODO: Is it really OK?
func_addr = addr
input_state = self.project.factory.entry_state()
input_state.ip = addr
pt = VFGNode(addr, block_id, input_state)
self._nodes[block_id] = pt
if isinstance(self.project.arch, archinfo.ArchARM) and addr % 2 == 1:
self._thumb_addrs.add(addr)
self._thumb_addrs.add(addr - 1)
l.debug("Block ID %s does not exist. Create a PathTerminator instead.",
repr(block_id))
return self._nodes[block_id]
def _graph_add_edge(self, src_block_id, dst_block_id, **kwargs):
"""
Add an edge onto the graph.
:param BlockID src_block_id: The block ID for source node.
:param BlockID dst_block_id: The block Id for destination node.
:param str jumpkind: The jumpkind of the edge.
:param exit_stmt_idx: ID of the statement in the source IRSB where this edge is created from. 'default'
refers to the default exit.
:return: None
"""
dst_node = self._graph_get_node(dst_block_id, terminator_for_nonexistent_node=True)
if src_block_id is None:
self.graph.add_node(dst_node)
else:
src_node = self._graph_get_node(src_block_id, terminator_for_nonexistent_node=True)
self.graph.add_edge(src_node, dst_node, **kwargs)
#
# Other methods
#
def _get_simsuccessors(self, state, addr):
error_occured = False
restart_analysis = False
jumpkind = 'Ijk_Boring'
if state.history.jumpkind:
jumpkind = state.history.jumpkind
try:
node = self._cfg.get_any_node(addr)
num_inst = None if node is None else len(node.instruction_addrs)
sim_successors = self.project.factory.successors(state, jumpkind=jumpkind, num_inst=num_inst)
except SimIRSBError as ex:
# It's a tragedy that we came across some instructions that VEX
# does not support. I'll create a terminating stub there
l.error("SimIRSBError occurred(%s). Creating a PathTerminator.", ex)
error_occured = True
inst = SIM_PROCEDURES["stubs"]["PathTerminator"](
state, self.project.arch)
sim_successors = SimEngineProcedure().process(state, inst)
except claripy.ClaripyError:
l.error("ClaripyError: ", exc_info=True)
error_occured = True
# Generate a PathTerminator to terminate the current path
inst = SIM_PROCEDURES["stubs"]["PathTerminator"](
state, self.project.arch)
sim_successors = SimEngineProcedure().process(state, inst)
except SimError:
l.error("SimError: ", exc_info=True)
error_occured = True
# Generate a PathTerminator to terminate the current path
inst = SIM_PROCEDURES["stubs"]["PathTerminator"](
state, self.project.arch)
sim_successors = SimEngineProcedure().process(state, inst)
except AngrError as ex:
#segment = self.project.loader.main_object.in_which_segment(addr)
l.error("AngrError %s when generating SimSuccessors at %#x",
ex, addr, exc_info=True)
# We might be on a wrong branch, and is likely to encounter the
# "No bytes in memory xxx" exception
# Just ignore it
error_occured = True
sim_successors = None
return sim_successors, error_occured, restart_analysis
def _create_new_jobs(self, job, successor, new_block_id, new_call_stack):
"""
Create a list of new VFG jobs for the successor state.
:param VFGJob job: The VFGJob instance.
:param SimState successor: The succeeding state.
:param BlockID new_block_id: Block ID for the new VFGJob
:param new_call_stack: The new callstack.
:return: A list of newly created VFG jobs.
:rtype: list
"""
# TODO: basic block stack is probably useless
jumpkind = successor.history.jumpkind
stmt_idx = successor.scratch.stmt_idx
ins_addr = successor.scratch.ins_addr
# Make a copy of the state in case we use it later
successor_state = successor.copy()
successor_addr = successor_state.solver.eval(successor_state.ip)
new_jobs = [ ]
if jumpkind == "Ijk_FakeRet":
assert job.is_call_jump
# This is the default "fake" return successor generated at each call, if and only if the target function
# returns.
# if the call is skipped (for whatever reason, like we reached the interfunction tracing limit), we use
# this FakeRet successor as the final state of the function. Otherwise we save the FakeRet state in case the
# callee does not return normally, but don't process them right away.
# Clear the useless values (like return addresses, parameters) on stack if needed
if self._cfg is not None:
current_function = self.kb.functions.function(job.call_target)
if current_function is not None:
sp_difference = current_function.sp_delta
else:
sp_difference = 0
reg_sp_offset = successor_state.arch.sp_offset
reg_sp_expr = successor_state.registers.load(reg_sp_offset) + sp_difference
successor_state.registers.store(successor_state.arch.sp_offset, reg_sp_expr)
# Clear the return value with a TOP
top_si = successor_state.solver.TSI(successor_state.arch.bits)
successor_state.registers.store(successor_state.arch.ret_offset, top_si)
if job.call_skipped:
# TODO: Make sure the return values make sense
#if self.project.arch.name == 'X86':
# successor_state.regs.eax = successor_state.solver.BVS('ret_val', 32, min=0, max=0xffffffff, stride=1)
new_job = VFGJob(successor_addr,
successor_state,
self._context_sensitivity_level,
block_id=new_block_id,
jumpkind='Ijk_Ret',
call_stack=new_call_stack,
src_block_id=job.block_id,
src_exit_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
)
new_jobs.append(new_job)
assert isinstance(self._task_stack[-2], FunctionAnalysis)
self._task_stack[-2].jobs.append(new_job)
job.dbg_exit_status[successor] = "Pending"
else:
self._pending_returns[new_block_id] = PendingJob(new_block_id, successor_state, new_call_stack,
job.block_id, stmt_idx, ins_addr)
job.dbg_exit_status[successor] = "Pending"
else:
if sim_options.ABSTRACT_MEMORY in successor.options:
if self._is_call_jumpkind(successor.history.jumpkind):
# If this is a call, we create a new stack address mapping
reg_sp_si = self._create_stack_region(successor_state, successor_addr)
# Save the new sp register
new_reg_sp_expr = successor_state.solver.ValueSet(successor_state.arch.bits,
'global',
0,
reg_sp_si
)
successor_state.regs.sp = new_reg_sp_expr
elif successor.history.jumpkind == "Ijk_Ret":
# Remove the existing stack address mapping
# FIXME: Now we are assuming the sp is restored to its original value
reg_sp_expr = successor_state.regs.sp
if isinstance(reg_sp_expr._model_vsa, claripy.vsa.StridedInterval):
reg_sp_si = reg_sp_expr._model_vsa
reg_sp_val = reg_sp_si.min
elif isinstance(reg_sp_expr._model_vsa, claripy.vsa.ValueSet):
reg_sp_si = next(iter(reg_sp_expr._model_vsa.items()))[1]
reg_sp_val = reg_sp_si.min
# TODO: Finish it!
new_job = VFGJob(successor_addr,
successor_state,
self._context_sensitivity_level,
block_id=new_block_id,
jumpkind=successor_state.history.jumpkind,
call_stack=new_call_stack,
src_block_id=job.block_id,
src_exit_stmt_idx=stmt_idx,
src_ins_addr=ins_addr,
)
if successor.history.jumpkind == 'Ijk_Ret':
# it's returning to the return site
# save the state as a final state of the function that we are returning from
if self._record_function_final_states:
# key of the function that we are returning from
source_function_key = FunctionKey.new(job.func_addr,
job.call_stack_suffix
)
self._save_function_final_state(source_function_key, job.func_addr, successor_state)
# TODO: add an assertion that requires the returning target being the same as the return address we
# TODO: stored before
current_task = self._top_task
if current_task.call_analysis is not None:
current_task.call_analysis.add_final_job(new_job)
job.dbg_exit_status[successor] = "Appended to the call analysis task"
else:
job.dbg_exit_status[successor] = "Discarded (no call analysis task)"
else:
if self._is_call_jumpkind(successor.history.jumpkind):
# create a function analysis task
# TODO: the return address
task = FunctionAnalysis(new_job.addr, None)
self._task_stack.append(task)
# link it to the call analysis
job.call_task.register_function_analysis(task)
else:
task = self._top_task
# register the job to the function task
task.jobs.append(new_job)
# insert the new job into the new job array
new_jobs.append(new_job)
job.dbg_exit_status[successor] = "Appended"
if not job.is_call_jump or jumpkind != "Ijk_FakeRet":
new_target = (new_block_id, jumpkind)
else:
new_target = (new_block_id, "Ijk_FakeRet") # This is the fake return!
self._exit_targets[job.call_stack_suffix + (job.addr,)].append(new_target)
return new_jobs
def _remove_pending_return(self, job, pending_returns):
"""
Remove all pending returns that are related to the current job.
"""
# Build the tuples that we want to remove from the dict fake_func_retn_exits
tpls_to_remove = [ ]
call_stack_copy = job.call_stack_copy()
while call_stack_copy.current_return_target is not None:
ret_target = call_stack_copy.current_return_target
# Remove the current call stack frame
call_stack_copy = call_stack_copy.ret(ret_target)
call_stack_suffix = call_stack_copy.stack_suffix(self._context_sensitivity_level)
tpl = call_stack_suffix + (ret_target,)
tpls_to_remove.append(tpl)
# Remove those tuples from the dict
for tpl in tpls_to_remove:
if tpl in pending_returns:
del pending_returns[tpl]
l.debug("Removed (%s) from FakeExits dict.",
",".join([hex(i) if i is not None else 'None' for i in tpl]))
def _post_job_handling_debug(self, job, successors):
"""
Print out debugging information after handling a VFGJob and generating the succeeding jobs.
:param VFGJob job: The VFGJob instance.
:param list successors: A list of succeeding states.
:return: None
"""
func = self.project.loader.find_symbol(job.addr)
function_name = func.name if func is not None else None
module_name = self.project.loader.find_object_containing(job.addr).provides
l.debug("VFGJob @ %#08x with callstack [ %s ]", job.addr,
job.callstack_repr(self.kb),
)
l.debug("(Function %s of %s)", function_name, module_name)
l.debug("- is call jump: %s", job.is_call_jump)
for suc in successors:
if suc not in job.dbg_exit_status:
l.warning("- %s is not found. FIND OUT WHY.", suc)
continue
try:
l.debug("- successor: %#08x of %s [%s]", suc.solver.eval_one(suc.ip),
suc.history.jumpkind, job.dbg_exit_status[suc])
except SimValueError:
l.debug("- target cannot be concretized. %s [%s]", job.dbg_exit_status[suc], suc.history.jumpkind)
l.debug("Remaining/pending jobs: %d/%d", len(self._job_info_queue), len(self._pending_returns))
l.debug("Remaining jobs: %s", [ "%s %d" % (ent.job, id(ent.job)) for ent in self._job_info_queue])
l.debug("Task stack: %s", self._task_stack)
@staticmethod
def _is_call_jumpkind(jumpkind):
if jumpkind == 'Ijk_Call' or jumpkind.startswith('Ijk_Sys_'):
return True
return False
@staticmethod
def _is_return_jumpkind(jumpkind):
return jumpkind in ('Ijk_Ret', 'Ijk_FakeRet')
@staticmethod
def _create_stack_region(successor_state, successor_ip):
reg_sp_offset = successor_state.arch.sp_offset
reg_sp_expr = successor_state.registers.load(reg_sp_offset)
if type(reg_sp_expr._model_vsa) is claripy.BVV: # pylint:disable=unidiomatic-typecheck
reg_sp_val = successor_state.solver.eval(reg_sp_expr)
reg_sp_si = successor_state.solver.SI(to_conv=reg_sp_expr)
reg_sp_si = reg_sp_si._model_vsa
elif type(reg_sp_expr._model_vsa) is int: # pylint:disable=unidiomatic-typecheck
reg_sp_val = reg_sp_expr._model_vsa
reg_sp_si = successor_state.solver.SI(bits=successor_state.arch.bits, to_conv=reg_sp_val)
reg_sp_si = reg_sp_si._model_vsa
elif type(reg_sp_expr._model_vsa) is claripy.vsa.StridedInterval: # pylint:disable=unidiomatic-typecheck
reg_sp_si = reg_sp_expr._model_vsa
reg_sp_val = reg_sp_si.min
else:
reg_sp_si = next(iter(reg_sp_expr._model_vsa.items()))[1]
reg_sp_val = reg_sp_si.min
reg_sp_val = reg_sp_val - successor_state.arch.bytes # TODO: Is it OK?
new_stack_region_id = successor_state.memory.stack_id(successor_ip)
successor_state.memory.set_stack_address_mapping(reg_sp_val,
new_stack_region_id,
successor_ip)
return reg_sp_si
def _create_callstack(self, job, successor_ip, jumpkind, fakeret_successor):
addr = job.addr
if self._is_call_jumpkind(jumpkind):
new_call_stack = job.call_stack_copy()
# Notice that in ARM, there are some freaking instructions
# like
# BLEQ <address>
# It should give us three exits: Ijk_Call, Ijk_Boring, and
# Ijk_Ret. The last exit is simulated.
# Notice: We assume the last exit is the simulated one
if fakeret_successor is None:
retn_target_addr = None
else:
retn_target_addr = fakeret_successor.solver.eval_one(fakeret_successor.ip)
# Create call stack
new_call_stack = new_call_stack.call(addr, successor_ip,
retn_target=retn_target_addr)
elif jumpkind == "Ijk_Ret":
new_call_stack = job.call_stack_copy()
new_call_stack = new_call_stack.ret(successor_ip)
else:
# Normal control flow transition
new_call_stack = job.call_stack
return new_call_stack
def _save_function_initial_state(self, function_key, function_address, state):
"""
Save the initial state of a function, and merge it with existing ones if there are any.
:param FunctionKey function_key: The key to this function.
:param int function_address: Address of the function.
:param SimState state: Initial state of the function.
:return: None
"""
l.debug('Saving the initial state for function %#08x with function key %s',
function_address,
function_key
)
if function_key in self._function_initial_states[function_address]:
existing_state = self._function_initial_states[function_address][function_key]
merged_state, _, _ = existing_state.merge(state)
self._function_initial_states[function_address][function_key] = merged_state
else:
self._function_initial_states[function_address][function_key] = state
def _save_function_final_state(self, function_key, function_address, state):
"""
Save the final state of a function, and merge it with existing ones if there are any.
:param FunctionKey function_key: The key to this function.
:param int function_address: Address of the function.
:param SimState state: Initial state of the function.
:return: None
"""
l.debug('Saving the final state for function %#08x with function key %s',
function_address,
function_key
)
if function_key in self._function_final_states[function_address]:
existing_state = self._function_final_states[function_address][function_key]
merged_state = existing_state.merge(state, plugin_whitelist=self._mergeable_plugins)[0]
self._function_final_states[function_address][function_key] = merged_state
else:
self._function_final_states[function_address][function_key] = state
def _trace_pending_job(self, job_key):
pending_job = self._pending_returns.pop(job_key) # type: PendingJob
addr = job_key.addr
# Unlike CFG, we will still trace those blocks that have been traced before. In other words, we don't
# remove fake returns even if they have been traced - otherwise we cannot come to a fix-point.
block_id = BlockID.new(addr,
pending_job.call_stack.stack_suffix(self._context_sensitivity_level),
'Ijk_Ret'
)
job = VFGJob(addr,
pending_job.state,
self._context_sensitivity_level,
block_id=block_id,
jumpkind=pending_job.state.history.jumpkind,
call_stack=pending_job.call_stack,
src_block_id=pending_job.src_block_id,
src_exit_stmt_idx=pending_job.src_stmt_idx,
src_ins_addr=pending_job.src_ins_addr,
)
self._insert_job(job)
self._top_task.jobs.append(job)
def _get_pending_job(self, func_addr):
pending_ret_key = None
for k in self._pending_returns.keys(): # type: BlockID
if k.func_addr == func_addr:
pending_ret_key = k
break
return pending_ret_key
def _get_block_addr(self, b): #pylint:disable=R0201
if isinstance(b, SimSuccessors):
return b.addr
else:
raise Exception("Unsupported block type %s" % type(b))
def _get_nx_paths(self, begin, end):
"""
Get the possible (networkx) simple paths between two nodes or addresses
corresponding to nodes.
Input: addresses or node instances
Return: a list of lists of nodes representing paths.
"""
if type(begin) is int and type(end) is int: # pylint:disable=unidiomatic-typecheck
n_begin = self.get_any_node(begin)
n_end = self.get_any_node(end)
elif isinstance(begin, VFGNode) and isinstance(end, VFGNode): # pylint:disable=unidiomatic-typecheck
n_begin = begin
n_end = end
else:
raise AngrVFGError("from and to should be of the same type")
return networkx.all_simple_paths(self.graph, n_begin, n_end)
def _merge_points(self, function_address):
"""
Return the ordered merge points for a specific function.
:param int function_address: Address of the querying function.
:return: A list of sorted merge points (addresses).
:rtype: list
"""
# we are entering a new function. now it's time to figure out how to optimally traverse the control flow
# graph by generating the sorted merge points
try:
new_function = self.kb.functions[function_address]
except KeyError:
# the function does not exist
return [ ]
if function_address not in self._function_merge_points:
ordered_merge_points = CFGUtils.find_merge_points(function_address, new_function.endpoints,
new_function.graph)
self._function_merge_points[function_address] = ordered_merge_points
return self._function_merge_points[function_address]
def _widening_points(self, function_address):
"""
Return the ordered widening points for a specific function.
:param int function_address: Address of the querying function.
:return: A list of sorted merge points (addresses).
:rtype: list
"""
# we are entering a new function. now it's time to figure out how to optimally traverse the control flow
# graph by generating the sorted merge points
try:
new_function = self.kb.functions[function_address]
except KeyError:
# the function does not exist
return [ ]
if function_address not in self._function_widening_points:
if not new_function.normalized:
new_function.normalize()
widening_points = CFGUtils.find_widening_points(function_address, new_function.endpoints,
new_function.graph)
self._function_widening_points[function_address] = widening_points
return self._function_widening_points[function_address]
def _ordered_node_addrs(self, function_address):
"""
For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an
empty list.
:param int function_address: Address of the function.
:return: A ordered list of the nodes.
:rtype: list
"""
try:
function = self.kb.functions[function_address]
except KeyError:
# the function does not exist
return [ ]
if function_address not in self._function_node_addrs:
sorted_nodes = CFGUtils.quasi_topological_sort_nodes(function.graph)
self._function_node_addrs[function_address] = [ n.addr for n in sorted_nodes ]
return self._function_node_addrs[function_address]
from angr.analyses import AnalysesHub
AnalysesHub.register_default('VFG', VFG)
|
py | b40fd25c230a22c96a5eba55101c36160951f863 | from machine import unique_id
from config import mqttHost, mqttSSL, mqttUser, mqttPass, mqttTopicRoot
ID = ''.join('{:02x}'.format(b) for b in unique_id())
def check_requirements():
import upip
try:
try:
import umqtt.simple
print('umqtt.simple module exists')
except:
upip.install('python-umqtt.simple')
print('umqtt.simple module installed')
except:
print('unable to find or install umqtt.simple module')
return False
try:
try:
from umqtt.robust import MQTTClient
print('umqtt.robust module exists')
except:
upip.install('python-umqtt.robust')
print('umqtt.robust module installed')
except:
print('unable to find or install umqtt.robust module')
return False
return True
def connect():
from umqtt.robust import MQTTClient
c = MQTTClient(ID, mqttHost, ssl=mqttSSL,
user=mqttUser, password=mqttPass)
c.DEBUG = True
if not c.connect():
print('connected, will publish to {}/{}/#'.format(mqttTopicRoot, ID))
c.set_last_will('{}/{}/status'.format(mqttTopicRoot, ID), 'offline', retain=True)
c.publish('{}/{}/status'.format(mqttTopicRoot, ID), 'connected', retain=True)
return c
return None
def publish(client, temperature, pressure, humidity):
client.publish('{}/{}/t'.format(mqttTopicRoot, ID), '{:.2f}C'.format(temperature), retain=True)
client.publish('{}/{}/p'.format(mqttTopicRoot, ID), '{:.2f}hPa'.format(pressure), retain=True)
client.publish('{}/{}/h'.format(mqttTopicRoot, ID), '{:.2f}%'.format(humidity), retain=True)
|
py | b40fd281bbeef93fcf1252ac7db0a3b0df7a39a4 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from sqlalchemy.dialects import mysql
from sqlalchemy.dialects.mysql import DATE, NVARCHAR, TEXT, VARCHAR
from superset.db_engine_specs.mysql import MySQLEngineSpec
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.utils.core import GenericDataType
from tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestMySQLEngineSpecsDbEngineSpec(TestDbEngineSpec):
@unittest.skipUnless(
TestDbEngineSpec.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_get_datatype_mysql(self):
"""Tests related to datatype mapping for MySQL"""
self.assertEqual("TINY", MySQLEngineSpec.get_datatype(1))
self.assertEqual("VARCHAR", MySQLEngineSpec.get_datatype(15))
def test_convert_dttm(self):
dttm = self.get_dttm()
self.assertEqual(
MySQLEngineSpec.convert_dttm("DATE", dttm),
"STR_TO_DATE('2019-01-02', '%Y-%m-%d')",
)
self.assertEqual(
MySQLEngineSpec.convert_dttm("DATETIME", dttm),
"STR_TO_DATE('2019-01-02 03:04:05.678900', '%Y-%m-%d %H:%i:%s.%f')",
)
def test_column_datatype_to_string(self):
test_cases = (
(DATE(), "DATE"),
(VARCHAR(length=255), "VARCHAR(255)"),
(
VARCHAR(length=255, charset="latin1", collation="utf8mb4_general_ci"),
"VARCHAR(255)",
),
(NVARCHAR(length=128), "NATIONAL VARCHAR(128)"),
(TEXT(), "TEXT"),
)
for original, expected in test_cases:
actual = MySQLEngineSpec.column_datatype_to_string(
original, mysql.dialect()
)
self.assertEqual(actual, expected)
def test_is_db_column_type_match(self):
type_expectations = (
# Numeric
("TINYINT", GenericDataType.NUMERIC),
("SMALLINT", GenericDataType.NUMERIC),
("MEDIUMINT", GenericDataType.NUMERIC),
("INT", GenericDataType.NUMERIC),
("BIGINT", GenericDataType.NUMERIC),
("DECIMAL", GenericDataType.NUMERIC),
("FLOAT", GenericDataType.NUMERIC),
("DOUBLE", GenericDataType.NUMERIC),
("BIT", GenericDataType.NUMERIC),
# String
("CHAR", GenericDataType.STRING),
("VARCHAR", GenericDataType.STRING),
("TINYTEXT", GenericDataType.STRING),
("MEDIUMTEXT", GenericDataType.STRING),
("LONGTEXT", GenericDataType.STRING),
# Temporal
("DATE", GenericDataType.TEMPORAL),
("DATETIME", GenericDataType.TEMPORAL),
("TIMESTAMP", GenericDataType.TEMPORAL),
("TIME", GenericDataType.TEMPORAL),
)
for type_str, col_type in type_expectations:
column_spec = MySQLEngineSpec.get_column_spec(type_str)
assert column_spec.generic_type == col_type
def test_extract_error_message(self):
from MySQLdb._exceptions import OperationalError
message = "Unknown table 'BIRTH_NAMES1' in information_schema"
exception = OperationalError(message)
extracted_message = MySQLEngineSpec._extract_error_message(exception)
assert extracted_message == message
exception = OperationalError(123, message)
extracted_message = MySQLEngineSpec._extract_error_message(exception)
assert extracted_message == message
def test_extract_errors(self):
"""
Test that custom error messages are extracted correctly.
"""
msg = "mysql: Access denied for user 'test'@'testuser.com'. "
result = MySQLEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR,
message='Either the username "test" or the password is incorrect.',
level=ErrorLevel.ERROR,
extra={
"engine_name": "MySQL",
"issue_codes": [
{
"code": 1014,
"message": "Issue 1014 - Either the"
" username or the password is wrong.",
},
{
"code": 1015,
"message": "Issue 1015 - Either the database is "
"spelled incorrectly or does not exist.",
},
],
},
)
]
msg = "mysql: Unknown MySQL server host 'badhostname.com'. "
result = MySQLEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
message='Unknown MySQL server host "badhostname.com".',
level=ErrorLevel.ERROR,
extra={
"engine_name": "MySQL",
"issue_codes": [
{
"code": 1007,
"message": "Issue 1007 - The hostname"
" provided can't be resolved.",
}
],
},
)
]
msg = "mysql: Can't connect to MySQL server on 'badconnection.com'."
result = MySQLEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR,
message='The host "badconnection.com" might be '
"down and can't be reached.",
level=ErrorLevel.ERROR,
extra={
"engine_name": "MySQL",
"issue_codes": [
{
"code": 1007,
"message": "Issue 1007 - The hostname provided"
" can't be resolved.",
}
],
},
)
]
msg = "mysql: Can't connect to MySQL server on '93.184.216.34'."
result = MySQLEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR,
message='The host "93.184.216.34" might be down and can\'t be reached.',
level=ErrorLevel.ERROR,
extra={
"engine_name": "MySQL",
"issue_codes": [
{
"code": 10007,
"message": "Issue 1007 - The hostname provided "
"can't be resolved.",
}
],
},
)
]
msg = "mysql: Unknown database 'badDB'."
result = MySQLEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='Unable to connect to database "badDB".',
error_type=SupersetErrorType.CONNECTION_UNKNOWN_DATABASE_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "MySQL",
"issue_codes": [
{
"code": 1015,
"message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.",
}
],
},
)
]
|
py | b40fd3aeb4770b687a9dc21ab8bb019cd5bc8e61 | # (c) Copyright 2015 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import unittest
from mock import Mock, patch
from swiftlm.systems import ntp
from swiftlm.utils.values import Severity
from swiftlm.utils.utility import CommandResult
from swiftlm.utils.metricdata import MetricData, CheckFailure
class TestNtp(unittest.TestCase):
def p(self, name, mock):
p = patch(name, mock)
p.start()
self.addCleanup(p.stop)
def setUp(self):
self.p('swiftlm.systems.ntp.BASE_RESULT.dimensions', {})
self.p('swiftlm.utils.metricdata.get_base_dimensions', lambda *a: {})
self.p('swiftlm.utils.metricdata.timestamp', lambda *a: 123456)
def test_status_ok(self):
mock_command = Mock()
mock_command.return_value = CommandResult(0, '')
with patch('swiftlm.systems.ntp.run_cmd', mock_command):
with patch('swiftlm.systems.ntp.check_details', lambda: []):
actual = ntp.main()
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), 1)
r = actual[0]
self.assertIsInstance(r, MetricData)
expected = MetricData.single(ntp.__name__, Severity.ok,
ntp.BASE_RESULT.messages['ok'])
self.assertEqual(r, expected)
def test_status_fail(self):
mock_command = Mock()
mock_command.return_value = CommandResult(1, 'error')
with patch('swiftlm.systems.ntp.run_cmd', mock_command):
with patch('swiftlm.systems.ntp.check_details', lambda: []):
actual = ntp.main()
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), 1)
r = actual[0]
self.assertIsInstance(r, MetricData)
expected = MetricData.single(ntp.__name__, Severity.fail,
ntp.BASE_RESULT.messages['fail'],
{'error': 'error'})
self.assertEqual(r, expected)
def test_details_ok(self):
mock_command = Mock()
mock_command.return_value = CommandResult(0, 'stratum=1,offset=2,')
with patch('swiftlm.systems.ntp.run_cmd', mock_command):
with patch('swiftlm.systems.ntp.check_status', lambda: []):
actual = ntp.main()
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), 2)
actual = [a.metric() for a in actual]
expected = [
MetricData.single(ntp.__name__+'.stratum', '1', ''),
MetricData.single(ntp.__name__+'.offset', '2', '')
]
for e in expected:
self.assertIn(e.metric(), actual)
def test_details_fail(self):
mock_command = Mock()
mock_command.return_value = CommandResult(0, 'stratum=1,')
with patch('swiftlm.systems.ntp.run_cmd', mock_command):
with patch('swiftlm.systems.ntp.check_status', lambda: []):
actual = ntp.main()
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), 2)
actual = [a.metric() for a in actual]
failed = CheckFailure.child()
failed.value = Severity.fail
failed['check'] = ntp.__name__ + '.offset'
failed['error'] = 'Output does not contain "offset"'
expected = [
failed,
MetricData.single(ntp.__name__+'.stratum', '1', ''),
]
for e in expected:
self.assertIn(e.metric(), actual)
def test_main(self):
with patch('swiftlm.systems.ntp.check_status', lambda: ['a']):
with patch('swiftlm.systems.ntp.check_details', lambda: ['b']):
actual = ntp.main()
self.assertListEqual(['a', 'b'], actual)
|
py | b40fd4aaa774af819aeb2fc22ec69e4d1eeb6916 | '''CITI file IO'''
import pkg_resources
from .record import Record
__version__ = pkg_resources.get_distribution("citi").version
__all__ = (
__version__,
Record,
)
|
py | b40fd57cc5c4ef6bd056ca7a56329026900e078e | # orm/mapper.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
from collections import deque
from itertools import chain
import sys
import weakref
from . import attributes
from . import exc as orm_exc
from . import instrumentation
from . import loading
from . import properties
from . import util as orm_util
from .base import _class_to_mapper
from .base import _state_mapper
from .base import class_mapper
from .base import state_str
from .interfaces import _MappedAttribute
from .interfaces import EXT_SKIP
from .interfaces import InspectionAttr
from .interfaces import MapperProperty
from .interfaces import ORMEntityColumnsClauseRole
from .interfaces import ORMFromClauseRole
from .path_registry import PathRegistry
from .. import event
from .. import exc as sa_exc
from .. import inspection
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..sql import base as sql_base
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..util import HasMemoized
_mapper_registries = weakref.WeakKeyDictionary()
_legacy_registry = None
def _all_registries():
with _CONFIGURE_MUTEX:
return set(_mapper_registries)
def _unconfigured_mappers():
for reg in _all_registries():
for mapper in reg._mappers_to_configure():
yield mapper
_already_compiling = False
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol("NO_ATTRIBUTE")
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(
ORMFromClauseRole,
ORMEntityColumnsClauseRole,
sql_base.MemoizedHasCacheKey,
InspectionAttr,
):
"""Define the correlation of class attributes to database table
columns.
The :class:`_orm.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`_orm.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`_orm.Mapper` which maintains it can be acquired
using the :func:`_sa.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_dispose_called = False
_ready_for_configure = False
@util.deprecated_params(
non_primary=(
"1.3",
"The :paramref:`.mapper.non_primary` parameter is deprecated, "
"and will be removed in a future release. The functionality "
"of non primary mappers is now better suited using the "
":class:`.AliasedClass` construct, which can also be used "
"as the target of a :func:`_orm.relationship` in 1.3.",
),
)
def __init__(
self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
polymorphic_load=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
passive_deletes=False,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
r"""Direct constructor for a new :class:`_orm.Mapper` object.
The :func:`_orm.mapper` function is normally invoked through the
use of the :class:`_orm.registry` object through either the
:ref:`Declarative <orm_declarative_mapping>` or
:ref:`Imperative <orm_imperative_mapping>` mapping styles.
.. versionchanged:: 1.4 The :func:`_orm.mapper` function should not
be called directly for classical mapping; for a classical mapping
configuration, use the :meth:`_orm.registry.map_imperatively`
method. The :func:`_orm.mapper` function may become private in a
future release.
Parameters documented below may be passed to either the
:meth:`_orm.registry.map_imperatively` method, or may be passed in the
``__mapper_args__`` declarative class attribute described at
:ref:`orm_declarative_mapper_options`.
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`_schema.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`_schema.Table`
produced as a result of the ``__tablename__``
and :class:`_schema.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`_query.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`_schema.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. seealso::
:ref:`orm_server_defaults`
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding
:class:`_orm.Mapper`
of one indicating a superclass to which this :class:`_orm.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`_schema.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphan object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
:param non_primary: Specify that this :class:`_orm.Mapper`
is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`_orm.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
.. seealso::
:ref:`relationship_aliased_class` - the new pattern that removes
the need for the :paramref:`_orm.Mapper.non_primary` flag.
:param passive_deletes: Indicates DELETE behavior of foreign key
columns when a joined-table inheritance entity is being deleted.
Defaults to ``False`` for a base mapper; for an inheriting mapper,
defaults to ``False`` unless the value is set to ``True``
on the superclass mapper.
When ``True``, it is assumed that ON DELETE CASCADE is configured
on the foreign key relationships that link this mapper's table
to its superclass table, so that when the unit of work attempts
to delete the entity, it need only emit a DELETE statement for the
superclass table, and not this table.
When ``False``, a DELETE statement is emitted for this mapper's
table individually. If the primary key attributes local to this
table are unloaded, then a SELECT must be emitted in order to
validate these attributes; note that the primary key columns
of a joined-table subclass are not part of the "primary key" of
the object as a whole.
Note that a value of ``True`` is **always** forced onto the
subclass mappers; that is, it's not possible for a superclass
to specify passive_deletes without this taking effect for
all subclass mappers.
.. versionadded:: 1.1
.. seealso::
:ref:`passive_deletes` - description of similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_updates` - supporting ON UPDATE
CASCADE for joined-table inheritance mappers
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_deletes` - supporting ON DELETE
CASCADE for joined-table inheritance mappers
:param polymorphic_load: Specifies "polymorphic loading" behavior
for a subclass in an inheritance hierarchy (joined and single
table inheritance only). Valid values are:
* "'inline'" - specifies this class should be part of the
"with_polymorphic" mappers, e.g. its columns will be included
in a SELECT query against the base.
* "'selectin'" - specifies that when instances of this class
are loaded, an additional SELECT will be emitted to retrieve
the columns specific to this subclass. The SELECT uses
IN to fetch multiple subclasses at once.
.. versionadded:: 1.2
.. seealso::
:ref:`with_polymorphic_mapper_config`
:ref:`polymorphic_selectin`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`_schema.Column` object that's
present in the mapped :class:`_schema.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`_schema.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that
:class:`_schema.Column`
objects present in
the mapped :class:`_schema.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`_schema.Column`
objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`_schema.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, "class_")
self._sort_key = "%s.%s" % (
self.class_.__module__,
self.class_.__name__,
)
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
if local_table is not None:
self.local_table = coercions.expect(roles.StrictFromClauseRole, local_table)
else:
self.local_table = None
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = (
coercions.expect(
roles.ColumnArgumentOrKeyRole,
polymorphic_on,
argname="polymorphic_on",
)
if polymorphic_on is not None
else None
)
self._dependency_processors = []
self.validators = util.EMPTY_DICT
self.passive_updates = passive_updates
self.passive_deletes = passive_deletes
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
self.polymorphic_load = polymorphic_load
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
with _CONFIGURE_MUTEX:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_class_instrumentation()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
self.registry._flag_new_mapper(self)
self._log("constructed")
self._expire_memoizations()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
represents_outer_join = False
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
def _gen_cache_key(self, anon_map, bindparams):
return (self,)
@property
def entity(self):
r"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`_expression.Selectable` which this :class:`_orm.Mapper`
manages.
Typically is an instance of :class:`_schema.Table` or
:class:`_expression.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`_orm.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`_orm.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`_orm.Mapper.persist_selectable`.
"""
persist_selectable = None
"""The :class:`_expression.Selectable` to which this :class:`_orm.Mapper`
is mapped.
Typically an instance of :class:`_schema.Table`,
:class:`_expression.Join`, or :class:`_expression.Alias`.
The :attr:`_orm.Mapper.persist_selectable` is separate from
:attr:`_orm.Mapper.selectable` in that the former represents columns
that are mapped on this class or its superclasses, whereas the
latter may be a "polymorphic" selectable that contains additional columns
which are in fact mapped on subclasses only.
"persist selectable" is the "thing the mapper writes to" and
"selectable" is the "thing the mapper selects from".
:attr:`_orm.Mapper.persist_selectable` is also separate from
:attr:`_orm.Mapper.local_table`, which represents the set of columns that
are locally mapped on this class directly.
.. seealso::
:attr:`_orm.Mapper.selectable`.
:attr:`_orm.Mapper.local_table`.
"""
inherits = None
"""References the :class:`_orm.Mapper` which this :class:`_orm.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = False
"""Represent ``True`` if this :class:`_orm.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`_schema.Table` objects
which this :class:`_orm.Mapper` is aware of.
If the mapper is mapped to a :class:`_expression.Join`, or an
:class:`_expression.Alias`
representing a :class:`_expression.Select`, the individual
:class:`_schema.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`_schema.Column`
objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`_orm.Mapper`.
This list is against the selectable in
:attr:`_orm.Mapper.persist_selectable`.
In the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a
:class:`_expression.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`_expression.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`_orm.Mapper`
features a ``primary_key`` argument that can override what the
:class:`_orm.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`_orm.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`_orm.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a single table
inheritance mapper.
:attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to select rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`_schema.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`_orm.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`_schema.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`_orm.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`_orm.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`_orm.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`_orm.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`_orm.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`_orm.Mapper`. In an inheritance scenario, it references
the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`_schema.Column` or other scalar expression
objects maintained by this :class:`_orm.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`_schema.Table` object,
except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`_schema.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`_orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`_orm.Mapper.columns`."""
@property
@util.deprecated("1.3", "Use .persist_selectable")
def mapped_table(self):
return self.persist_selectable
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inheriting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'"
% (self.class_.__name__, self.inherits.class_.__name__)
)
self.dispatch._update(self.inherits.dispatch)
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper" % (np, self.class_.__name__, np)
)
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.persist_selectable = self.inherits.persist_selectable
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.persist_selectable = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table, self.local_table
)
self.persist_selectable = sql.join(
self.inherits.persist_selectable,
self.local_table,
self.inherit_condition,
)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = sql_util.criterion_as_pairs(
self.persist_selectable.onclause,
consider_as_foreign_keys=fks,
)
else:
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif (
self.inherits.version_id_col is not None
and self.version_id_col is not self.inherits.version_id_col
):
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning."
% (
self.version_id_col.description,
self.inherits.version_id_col.description,
)
)
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self.passive_deletes = self.inherits.passive_deletes or self.passive_deletes
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity."
% (
self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self,
self.polymorphic_identity,
)
)
self.polymorphic_map[self.polymorphic_identity] = self
if self.polymorphic_load and self.concrete:
raise sa_exc.ArgumentError(
"polymorphic_load is not currently supported "
"with concrete table inheritance"
)
if self.polymorphic_load == "inline":
self.inherits._add_with_polymorphic_subclass(self)
elif self.polymorphic_load == "selectin":
pass
elif self.polymorphic_load is not None:
raise sa_exc.ArgumentError(
"unknown argument for polymorphic_load: %r" % self.polymorphic_load
)
else:
self._all_tables = set()
self.base_mapper = self
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.persist_selectable is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a persist_selectable specified." % self
)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == "*":
self.with_polymorphic = ("*", None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(with_polymorphic[0], util.string_types + (tuple, list)):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if self.with_polymorphic and self.with_polymorphic[1] is not None:
self.with_polymorphic = (
self.with_polymorphic[0],
coercions.expect(
roles.StrictFromClauseRole,
self.with_polymorphic[1],
allow_select=True,
),
)
if self.configured:
self._expire_memoizations()
def _add_with_polymorphic_subclass(self, mapper):
subcl = mapper.class_
if self.with_polymorphic is None:
self._set_with_polymorphic((subcl,))
elif self.with_polymorphic[0] != "*":
self._set_with_polymorphic(
(self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1])
)
def _set_concrete_base(self, mapper):
"""Set the given :class:`_orm.Mapper` as the 'inherits' for this
:class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class and entity name.
Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity``
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
# we expect that declarative has applied the class manager
# already and set up a registry. if this is None,
# we will emit a deprecation warning below when we also see that
# it has no registry.
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_
)
self.class_manager = manager
self.registry = manager.registry
self._identity_class = manager.mapper._identity_class
manager.registry._add_non_primary_mapper(self)
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. " % self.class_
)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
self.dispatch.instrument_class(self, self.class_)
# this invokes the class_instrument event and sets up
# the __init__ method. documented behavior is that this must
# occur after the instrument_class event above.
# yes two events with the same two words reversed and different APIs.
# :(
manager = instrumentation.register_class(
self.class_,
mapper=self,
expired_attribute_loader=util.partial(loading.load_scalar_attributes, self),
# finalize flag means instrument the __init__ method
# and call the class_instrument event
finalize=True,
)
if not manager.registry:
util.warn_deprecated_20(
"Calling the mapper() function directly outside of a "
"declarative registry is deprecated."
" Please use the sqlalchemy.orm.registry.map_imperatively() "
"function for a classical mapping."
)
assert _legacy_registry is not None
_legacy_registry._add_manager(manager)
self.class_manager = manager
self.registry = manager.registry
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.mapper is None:
return
event.listen(manager, "init", _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if key == "__init__" and hasattr(method, "_sa_original_init"):
method = method._sa_original_init
if hasattr(method, "__func__"):
method = method.__func__
if callable(method):
if hasattr(method, "__sa_reconstructor__"):
self._reconstructor = method
event.listen(manager, "load", _event_on_load, raw=True)
elif hasattr(method, "__sa_validators__"):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
if name in self.validators:
raise sa_exc.InvalidRequestError(
"A validation function for mapped "
"attribute %r on mapper %s already exists."
% (name, self)
)
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
def _set_dispose_flags(self):
self.configured = True
self._ready_for_configure = True
self._dispose_called = True
self.__dict__.pop("_configure_failed", None)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.persist_selectable)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(
chain(*[col.proxy_set for col in self._columntoproperty])
)
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.persist_selectable])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = util.ordered_column_set(
t.primary_key
).intersection(pk_cols)
self._cols_by_table[t] = util.ordered_column_set(t.c).intersection(all_cols)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif (
self.persist_selectable not in self._pks_by_table
or len(self._pks_by_table[self.persist_selectable]) == 0
):
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
elif self.local_table not in self._pks_by_table and isinstance(
self.local_table, schema.Table
):
util.warn(
"Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description
)
if self.inherits and not self.concrete and not self._primary_key_argument:
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or persist_selectable pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[
self.persist_selectable.corresponding_column(c)
for c in self._primary_key_argument
],
ignore_nonexistent_tables=True,
)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.persist_selectable],
ignore_nonexistent_tables=True,
)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props
and (not hasattr(col, "table") or col.table not in self._cols_by_table)
)
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
# TODO: technically this should be a DedupeColumnCollection
# however DCC needs changes and more tests to fully cover
# storing columns under a separate key name
self.columns = self.c = sql_base.ColumnCollection()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.persist_selectable.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or "") + column.key
if self._should_exclude(
column.key,
column_key,
local=self.local_table.c.contains_column(column),
column=column,
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(column_key, column, init=False, setparent=True)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on
),
replace_context=err,
)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(self.polymorphic_on, properties.ColumnProperty):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
prop = self.polymorphic_on
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's persist_selectable
col = self.persist_selectable.corresponding_column(self.polymorphic_on)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either persist_selectable or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None
or self.with_polymorphic[1].corresponding_column(col) is None
):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly" % col.description
)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, "key", None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" % col.key
)
else:
self.polymorphic_on = col = col.label("_sa_polymorphic_on")
key = col.key
prop = properties.ColumnProperty(col, _instrument=instrument)
self._configure_property(key, prop, init=init, setparent=True)
# the actual polymorphic_on should be the first public-facing
# column in the property
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.persist_selectable is mapper.persist_selectable:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = (
self.persist_selectable
).corresponding_column(mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = (
mapper._set_polymorphic_identity
)
self._validate_polymorphic_identity = (
mapper._validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state,
dict_,
state.manager.mapper.polymorphic_identity,
None,
)
def _validate_polymorphic_identity(mapper, state, dict_):
if (
polymorphic_key in dict_
and dict_[polymorphic_key]
not in mapper._acceptable_polymorphic_identities
):
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key]),
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = _validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@HasMemoized.memoized_attribute
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@HasMemoized.memoized_attribute
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.persist_selectable is self.persist_selectable:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@HasMemoized.memoized_attribute
def _prop_set(self):
return frozenset(self._props.values())
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _adapt_inherited_property(self, key, prop, init):
descriptor_props = util.preloaded.orm_descriptor_props
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
# determine if the class implements this attribute; if not,
# or if it is implemented by the attribute that is handling the
# given superclass-mapped property, then we need to report that we
# can't use this at the instance level since we are a concrete
# mapper and we don't map this. don't trip user-defined
# descriptors that might have side effects when invoked.
implementing_attribute = self.class_manager._get_class_attr_mro(key, prop)
if implementing_attribute is prop or (
isinstance(implementing_attribute, attributes.InstrumentedAttribute)
and implementing_attribute._parententity is prop.parent
):
self._configure_property(
key,
descriptor_props.ConcreteInheritedProperty(),
init=init,
setparent=True,
)
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _configure_property(self, key, prop, init=True, setparent=True):
descriptor_props = util.preloaded.orm_descriptor_props
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.persist_selectable.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.persist_selectable._refresh_for_new_column(col)
col = self.persist_selectable.corresponding_column(
prop.columns[0]
)
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, "_readonly_props") and (
not hasattr(col, "table") or col.table not in self._cols_by_table
):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if (
hasattr(self, "_cols_by_table")
and col.table in self._cols_by_table
and col not in self._cols_by_table[col.table]
):
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, "_is_polymorphic_discriminator"):
prop._is_polymorphic_discriminator = (
col is self.polymorphic_on or prop.columns[0] is self.polymorphic_on
)
if isinstance(col, expression.Label):
# new in 1.4, get column property against expressions
# to be addressable in subqueries
col.key = col._key_label = key
self.columns.add(col, key)
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and getattr(
self._props[key], "_mapped_by_synonym", False
):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if (
key in self._props
and not isinstance(prop, properties.ColumnProperty)
and not isinstance(
self._props[key],
(
properties.ColumnProperty,
descriptor_props.ConcreteInheritedProperty,
),
)
):
util.warn(
"Property %s on %s being replaced with new "
"property %s; the old property will be discarded"
% (self._props[key], self, prop)
)
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProperty` given a
:class:`_schema.Column` object."""
descriptor_props = util.preloaded.orm_descriptor_props
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
assert isinstance(column, expression.ColumnElement)
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
(
not self._inherits_equated_pairs
or (prop.columns[0], column) not in self._inherits_equated_pairs
)
and not prop.columns[0].shares_lineage(column)
and prop.columns[0] is not self.version_id_col
and column is not self.version_id_col
):
warn_only = prop.parent is not self
msg = (
"Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key)
)
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log(
"inserting column to existing list "
"in properties.ColumnProperty %s" % (key)
)
return prop
elif prop is None or isinstance(
prop, descriptor_props.ConcreteInheritedProperty
):
mapped_column = []
for c in columns:
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.persist_selectable._refresh_for_new_column(mc)
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c)
)
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." % (key, self, column.key, prop)
)
def _check_configure(self):
if self.registry._new_mappers:
_configure_registries({self.registry}, cascade=True)
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
mapper._reset_memoizations()
@property
def _log_desc(self):
return (
"("
+ self.class_.__name__
+ "|"
+ (
self.local_table is not None
and self.local_table.description
or str(self.local_table)
)
+ (self.non_primary and "|non-primary" or "")
+ ")"
)
def _log(self, msg, *args):
self.logger.info("%s " + msg, *((self._log_desc,) + args))
def _log_debug(self, msg, *args):
self.logger.debug("%s " + msg, *((self._log_desc,) + args))
def __repr__(self):
return "<Mapper at 0x%x; %s>" % (id(self), self.class_.__name__)
def __str__(self):
return "mapped class %s%s->%s" % (
self.class_.__name__,
self.non_primary and " (non-primary)" or "",
self.local_table.description
if self.local_table is not None
else self.persist_selectable.description,
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity
)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key."""
if _configure_mappers:
self._check_configure()
try:
return self._props[key]
except KeyError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key)
),
replace_context=err,
)
def get_property_by_column(self, column):
"""Given a :class:`_schema.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
self._check_configure()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == "*":
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" % (m, self)
)
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(sql_util.find_tables(selectable, include_aliases=True))
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.persist_selectable
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used."
)
elif not m.single:
if innerjoin:
from_obj = from_obj.join(m.local_table, m.inherit_condition)
else:
from_obj = from_obj.outerjoin(m.local_table, m.inherit_condition)
return from_obj
@HasMemoized.memoized_attribute
def _single_table_criterion(self):
if self.single and self.inherits and self.polymorphic_on is not None:
return self.polymorphic_on._annotate({"parentmapper": self}).in_(
m.polymorphic_identity for m in self.self_and_descendants
)
else:
return None
@HasMemoized.memoized_attribute
def _with_polymorphic_mappers(self):
self._check_configure()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@HasMemoized.memoized_attribute
def _post_inspect(self):
"""This hook is invoked by attribute inspection.
E.g. when Query calls:
coercions.expect(roles.ColumnsClauseRole, ent, keep_inspect=True)
This allows the inspection process run a configure mappers hook.
"""
self._check_configure()
@HasMemoized.memoized_attribute
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.persist_selectable
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable), False
)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`_orm.Mapper` objects included in the
default "polymorphic" query.
"""
@HasMemoized.memoized_attribute
def _insert_cols_evaluating_none(self):
return dict(
(
table,
frozenset(col for col in columns if col.type.should_evaluate_none),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _insert_cols_as_none(self):
return dict(
(
table,
frozenset(
col.key
for col in columns
if not col.primary_key
and not col.server_default
and not col.default
and not col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _propkey_to_col(self):
return dict(
(
table,
dict((self._columntoproperty[col].key, col) for col in columns),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _pk_keys_by_table(self):
return dict(
(table, frozenset([col.key for col in pks]))
for table, pks in self._pks_by_table.items()
)
@HasMemoized.memoized_attribute
def _pk_attr_keys_by_table(self):
return dict(
(
table,
frozenset([self._columntoproperty[col].key for col in pks]),
)
for table, pks in self._pks_by_table.items()
)
@HasMemoized.memoized_attribute
def _server_default_cols(self):
return dict(
(
table,
frozenset(
[col.key for col in columns if col.server_default is not None]
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _server_default_plus_onupdate_propkeys(self):
result = set()
for table, columns in self._cols_by_table.items():
for col in columns:
if (
col.server_default is not None or col.server_onupdate is not None
) and col in self._columntoproperty:
result.add(self._columntoproperty[col].key)
return result
@HasMemoized.memoized_attribute
def _server_onupdate_default_cols(self):
return dict(
(
table,
frozenset(
[col.key for col in columns if col.server_onupdate is not None]
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_instancemethod
def __clause_element__(self):
annotations = {
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
}
if self.persist_selectable is not self.local_table:
# joined table inheritance, with polymorphic selectable,
# etc.
annotations["dml_table"] = self.local_table._annotate(
{
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
}
)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
return self.selectable._annotate(annotations)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
@util.memoized_property
def select_identity_token(self):
return (
expression.null()
._annotate(
{
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
"identity_token": True,
}
)
._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
)
@property
def selectable(self):
"""The :class:`_schema.FromClause` construct this
:class:`_orm.Mapper` selects from by default.
Normally, this is equivalent to :attr:`.persist_selectable`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(self, spec=None, selectable=False, innerjoin=False):
if selectable not in (None, False):
selectable = coercions.expect(
roles.StrictFromClauseRole, selectable, allow_select=True
)
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers, innerjoin)
@HasMemoized.memoized_attribute
def _polymorphic_properties(self):
return list(
self._iterate_polymorphic_properties(self._with_polymorphic_mappers)
)
@property
def _all_column_expressions(self):
poly_properties = self._polymorphic_properties
adapter = self._polymorphic_adapter
return [
adapter.columns[prop.columns[0]] if adapter else prop.columns[0]
for prop in poly_properties
if isinstance(prop, properties.ColumnProperty)
and prop._renders_in_subqueries
]
def _columns_plus_keys(self, polymorphic_mappers=()):
if polymorphic_mappers:
poly_properties = self._iterate_polymorphic_properties(polymorphic_mappers)
else:
poly_properties = self._polymorphic_properties
return [
(prop.key, prop.columns[0])
for prop in poly_properties
if isinstance(prop, properties.ColumnProperty)
]
@HasMemoized.memoized_attribute
def _polymorphic_adapter(self):
if self.with_polymorphic:
return sql_util.ColumnAdapter(
self.selectable, equivalents=self._equivalent_columns
)
else:
return None
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(*[list(mapper.iterate_properties) for mapper in [self] + mappers])
):
if getattr(c, "_is_polymorphic_discriminator", False) and (
self.polymorphic_on is None
or c.columns[0] is not self.polymorphic_on
):
continue
yield c
@HasMemoized.memoized_attribute
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`_orm.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, including :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
The :attr:`_orm.Mapper.attrs` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
self._check_configure()
return util.ImmutableProperties(self._props)
@HasMemoized.memoized_attribute
def all_orm_descriptors(self):
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
The sorting of the attributes is based on the following rules:
1. Iterate through the class and its superclasses in order from
subclass to superclass (i.e. iterate through ``cls.__mro__``)
2. For each class, yield the attributes in the order in which they
appear in ``__dict__``, with the exception of those in step
3 below. In Python 3.6 and above this ordering will be the
same as that of the class' construction, with the exception
of attributes that were added after the fact by the application
or the mapper.
3. If a certain attribute key is also in the superclass ``__dict__``,
then it's included in the iteration for that class, and not the
class in which it first appeared.
The above process produces an ordering that is deterministic in terms
of the order in which attributes were assigned to the class.
.. versionchanged:: 1.3.19 ensured deterministic ordering for
:meth:`_orm.Mapper.all_orm_descriptors`.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`_orm.Mapper.attrs`.
.. warning::
The :attr:`_orm.Mapper.all_orm_descriptors`
accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs`
"""
return util.ImmutableProperties(dict(self.class_manager._all_sqla_attributes()))
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
descriptor_props = util.preloaded.orm_descriptor_props
return self._filter_properties(descriptor_props.SynonymProperty)
@property
def entity_namespace(self):
return self.class_
@HasMemoized.memoized_attribute
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@util.preload_module("sqlalchemy.orm.relationships")
@HasMemoized.memoized_attribute
def relationships(self):
"""A namespace of all :class:`.RelationshipProperty` properties
maintained by this :class:`_orm.Mapper`.
.. warning::
the :attr:`_orm.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.relationships[somename]`` over
``getattr(mapper.relationships, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(
util.preloaded.orm_relationships.RelationshipProperty
)
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(
util.preloaded.orm_descriptor_props.CompositeProperty
)
def _filter_properties(self, type_):
self._check_configure()
return util.ImmutableProperties(
util.OrderedDict(
(k, v) for k, v in self._props.items() if isinstance(v, type_)
)
)
@HasMemoized.memoized_attribute
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [
(
primary_key,
sql.bindparam("pk_%d" % idx, type_=primary_key.type),
)
for idx, primary_key in enumerate(self.primary_key, 1)
]
return (
sql.and_(*[k == v for (k, v) in params]),
util.column_dict(params),
)
@HasMemoized.memoized_attribute
def _equivalent_columns(self):
"""Create a map of all equivalent columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, e.g.::
{
tablea.col1:
{tableb.col1, tablec.col1},
tablea.col2:
{tabled.col2}
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {}, {"binary": visit_binary}
)
return result
def _is_userland_descriptor(self, assigned_name, obj):
if isinstance(
obj,
(
_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement,
),
):
return False
else:
return assigned_name not in self._dataclass_fields
@HasMemoized.memoized_attribute
def _dataclass_fields(self):
return [f.name for f in util.dataclass_fields(self.class_)]
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
# ignore dataclass field default values
if local:
if self.class_.__dict__.get(
assigned_name, None
) is not None and self._is_userland_descriptor(
assigned_name, self.class_.__dict__[assigned_name]
):
return True
else:
attr = self.class_manager._get_class_attr_mro(assigned_name, None)
if attr is not None and self._is_userland_descriptor(assigned_name, attr):
return True
if (
self.include_properties is not None
and name not in self.include_properties
and (column is None or column not in self.include_properties)
):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and (
name in self.exclude_properties
or (column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def is_sibling(self, other):
"""return true if the other mapper is an inheriting sibling to this
one. common parent but different branch
"""
return (
self.base_mapper is other.base_mapper
and not self.isa(other)
and not other.isa(self)
)
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@HasMemoized.memoized_attribute
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
rk = result.keys()
for col in pk_cols:
if col not in rk:
return False
else:
return True
def identity_key_from_row(self, row, identity_token=None, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.Row` instance. The columns which are
mapped by this :class:`_orm.Mapper` should be locatable in the row,
preferably via the :class:`_schema.Column`
object directly (as is the case
when a :func:`_expression.select` construct is executed), or
via string names of the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return (
self._identity_class,
tuple(row[column] for column in pk_cols),
identity_token,
)
def identity_key_from_primary_key(self, primary_key, identity_token=None):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key), identity_token
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
state = attributes.instance_state(instance)
return self._identity_key_from_state(state, attributes.PASSIVE_OFF)
def _identity_key_from_state(
self, state, passive=attributes.PASSIVE_RETURN_NO_VALUE
):
dict_ = state.dict
manager = state.manager
return (
self._identity_class,
tuple(
[
manager[prop.key].impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
),
state.identity_token,
)
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
identity_key = self._identity_key_from_state(state, attributes.PASSIVE_OFF)
return identity_key[1]
@HasMemoized.memoized_attribute
def _persistent_sortkey_fn(self):
key_fns = [col.type.sort_key_function for col in self.primary_key]
if set(key_fns).difference([None]):
def key(state):
return tuple(
key_fn(val) if key_fn is not None else val
for key_fn, val in zip(key_fns, state.key[1])
)
else:
def key(state):
return state.key[1]
return key
@HasMemoized.memoized_attribute
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@HasMemoized.memoized_attribute
def _all_pk_cols(self):
collection = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@HasMemoized.memoized_attribute
def _should_undefer_in_wildcard(self):
cols = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@HasMemoized.memoized_attribute
def _primary_key_propkeys(self):
return {self._columntoproperty[col].key for col in self._all_pk_cols}
def _get_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NO_VALUE
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=attributes.PASSIVE_OFF
)
def _get_committed_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NO_VALUE
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get_committed_value(
state, dict_, passive=passive
)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
col_attribute_names = set(attribute_names).intersection(
state.mapper.column_attrs.keys()
)
tables = set(
chain(
*[
sql_util.find_tables(c, check_columns=True)
for key in col_attribute_names
for c in props[key].columns
]
)
)
if self.base_mapper.local_table in tables:
return None
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state,
state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if leftval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.left = sql.bindparam(None, leftval, type_=binary.right.type)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state,
state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if rightval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.right = sql.bindparam(None, rightval, type_=binary.right.type)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table, expression.TableClause):
return None
if start and not mapper.single:
allconds.append(
visitors.cloned_traverse(
mapper.inherit_condition,
{},
{"binary": visit_binary},
)
)
except _OptGetColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in col_attribute_names:
cols.extend(props[key].columns)
return (
sql.select(*cols)
.where(cond)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
def _iterate_to_target_viawpoly(self, mapper):
if self.isa(mapper):
prev = self
for m in self.iterate_to_root():
yield m
if m is not prev and prev not in m._with_polymorphic_mappers:
break
prev = m
if m is mapper:
break
def _should_selectin_load(self, enabled_via_opt, polymorphic_from):
if not enabled_via_opt:
# common case, takes place for all polymorphic loads
mapper = polymorphic_from
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin":
return m
else:
# uncommon case, selectin load options were used
enabled_via_opt = set(enabled_via_opt)
enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt}
for entity in enabled_via_opt.union([polymorphic_from]):
mapper = entity.mapper
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin" or m in enabled_via_opt_mappers:
return enabled_via_opt_mappers.get(m, m)
return None
@util.preload_module("sqlalchemy.ext.baked", "sqlalchemy.orm.strategy_options")
def _subclass_load_via_in(self, entity):
"""Assemble a BakedQuery that can load the columns local to
this subclass as a SELECT with IN.
"""
strategy_options = util.preloaded.orm_strategy_options
baked = util.preloaded.ext_baked
assert self.inherits
polymorphic_prop = self._columntoproperty[self.polymorphic_on]
keep_props = set([polymorphic_prop] + self._identity_key_props)
disable_opt = strategy_options.Load(entity)
enable_opt = strategy_options.Load(entity)
for prop in self.attrs:
if prop.parent is self or prop in keep_props:
# "enable" options, to turn on the properties that we want to
# load by default (subject to options from the query)
enable_opt.set_generic_strategy((prop.key,), dict(prop.strategy_key))
else:
# "disable" options, to turn off the properties from the
# superclass that we *don't* want to load, applied after
# the options from the query to override them
disable_opt.set_generic_strategy((prop.key,), {"do_nothing": True})
primary_key = [
sql_util._deep_annotate(pk, {"_orm_adapt": True}) for pk in self.primary_key
]
if len(primary_key) > 1:
in_expr = sql.tuple_(*primary_key)
else:
in_expr = primary_key[0]
if entity.is_aliased_class:
assert entity.mapper is self
q = baked.BakedQuery(
self._compiled_cache,
lambda session: session.query(entity).select_entity_from(
entity.selectable
),
(self,),
)
q.spoil()
else:
q = baked.BakedQuery(
self._compiled_cache,
lambda session: session.query(self),
(self,),
)
q += lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*primary_key)
return q, enable_opt, disable_opt
@HasMemoized.memoized_attribute
def _subclass_load_via_in_mapper(self):
return self._subclass_load_via_in(self)
def cascade_iterator(self, type_, state, halt_on=None):
r"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type\_:
The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
etc.).
.. note:: the ``"all"`` cascade is not accepted here. For a generic
object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
:return: the method yields individual object instances.
.. seealso::
:ref:`unitofwork_cascades`
:ref:`faq_walk_objects` - illustrates a generic function to
traverse all objects without relying on cascades.
"""
visited_states = set()
prp, mpp = object(), object()
assert state.mapper.isa(self)
visitables = deque(
[(deque(state.mapper._props.values()), prp, state, state.dict)]
)
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(
prop.cascade_iterator(
type_,
parent_state,
parent_dict,
visited_states,
halt_on,
)
)
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
(
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
) = iterator.popleft()
yield (
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
)
visitables.append(
(
deque(instance_mapper._props.values()),
prp,
corresponding_state,
corresponding_dict,
)
)
@HasMemoized.memoized_attribute
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@HasMemoized.memoized_attribute
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend(
[(super_table, table) for super_table in super_.tables]
)
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if (
parent is not None
and dep is not None
and dep is not parent
and dep.inherit_condition is not None
):
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(sql_util._find_columns(parent.inherit_condition))
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(
table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies,
)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and cols.intersection(
util.reduce(
set.union,
[l.proxy_set for l, r in m._inherits_equated_pairs],
)
):
result[table].append((m, m._inherits_equated_pairs))
return result
class _OptGetColumnsNotAvailable(Exception):
pass
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far across all :class:`_orm.registry`
collections.
The configure step is used to reconcile and initialize the
:func:`_orm.relationship` linkages between mapped classes, as well as to
invoke configuration events such as the
:meth:`_orm.MapperEvents.before_configured` and
:meth:`_orm.MapperEvents.after_configured`, which may be used by ORM
extensions or user-defined extension hooks.
Mapper configuration is normally invoked automatically, the first time
mappings from a particular :class:`_orm.registry` are used, as well as
whenever mappings are used and additional not-yet-configured mappers have
been constructed. The automatic configuration process however is local only
to the :class:`_orm.registry` involving the target mapper and any related
:class:`_orm.registry` objects which it may depend on; this is
equivalent to invoking the :meth:`_orm.registry.configure` method
on a particular :class:`_orm.registry`.
By contrast, the :func:`_orm.configure_mappers` function will invoke the
configuration process on all :class:`_orm.registry` objects that
exist in memory, and may be useful for scenarios where many individual
:class:`_orm.registry` objects that are nonetheless interrelated are
in use.
.. versionchanged:: 1.4
As of SQLAlchemy 1.4.0b2, this function works on a
per-:class:`_orm.registry` basis, locating all :class:`_orm.registry`
objects present and invoking the :meth:`_orm.registry.configure` method
on each. The :meth:`_orm.registry.configure` method may be preferred to
limit the configuration of mappers to those local to a particular
:class:`_orm.registry` and/or declarative base class.
Points at which automatic configuration is invoked include when a mapped
class is instantiated into an instance, as well as when ORM queries
are emitted using :meth:`.Session.query` or :meth:`_orm.Session.execute`
with an ORM-enabled statement.
The mapper configure process, whether invoked by
:func:`_orm.configure_mappers` or from :meth:`_orm.registry.configure`,
provides several event hooks that can be used to augment the mapper
configuration step. These hooks include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` or :meth:`_orm.registry.configure` does any
work; this can be used to establish additional options, properties, or
related mappings before the operation proceeds.
* :meth:`.MapperEvents.mapper_configured` - called as each individual
:class:`_orm.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` or :meth:`_orm.registry.configure` is
complete; at this stage, all :class:`_orm.Mapper` objects that fall
within the scope of the configuration operation will be fully configured.
Note that the calling application may still have other mappings that
haven't been produced yet, such as if they are in modules as yet
unimported, and may also have mappings that are still to be configured,
if they are in other :class:`_orm.registry` collections not part of the
current scope of configuration.
"""
_configure_registries(_all_registries(), cascade=True)
def _configure_registries(registries, cascade):
for reg in registries:
if reg._new_mappers:
break
else:
return
with _CONFIGURE_MUTEX:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
for reg in registries:
if reg._new_mappers:
break
else:
return
Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
_do_configure_registries(registries, cascade)
finally:
_already_compiling = False
Mapper.dispatch._for_class(Mapper).after_configured()
@util.preload_module("sqlalchemy.orm.decl_api")
def _do_configure_registries(registries, cascade):
registry = util.preloaded.orm_decl_api.registry
orig = set(registries)
for reg in registry._recurse_with_dependencies(registries):
has_skip = False
for mapper in reg._mappers_to_configure():
run_configure = None
for fn in mapper.dispatch.before_mapper_configured:
run_configure = fn(mapper, mapper.class_)
if run_configure is EXT_SKIP:
has_skip = True
break
if run_configure is EXT_SKIP:
continue
if getattr(mapper, "_configure_failed", False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Triggering mapper: '%s'. "
"Original exception was: %s" % (mapper, mapper._configure_failed)
)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(mapper, mapper.class_)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, "_configure_failed"):
mapper._configure_failed = exc
raise
if not has_skip:
reg._new_mappers = False
if not cascade and reg._dependencies.difference(orig):
raise sa_exc.InvalidRequestError(
"configure was called with cascade=False but "
"additional registries remain"
)
@util.preload_module("sqlalchemy.orm.decl_api")
def _dispose_registries(registries, cascade):
registry = util.preloaded.orm_decl_api.registry
orig = set(registries)
for reg in registry._recurse_with_dependents(registries):
if not cascade and reg._dependents.difference(orig):
raise sa_exc.InvalidRequestError(
"Registry has dependent registries that are not disposed; "
"pass cascade=True to clear these also"
)
while reg._managers:
try:
manager, _ = reg._managers.popitem()
except KeyError:
# guard against race between while and popitem
pass
else:
reg._dispose_manager_and_mapper(manager)
reg._non_primary_mappers.clear()
reg._dependents.clear()
for dep in reg._dependencies:
dep._dependents.discard(reg)
reg._dependencies.clear()
# this wasn't done in the 1.3 clear_mappers() and in fact it
# was a bug, as it could cause configure_mappers() to invoke
# the "before_configured" event even though mappers had all been
# disposed.
reg._new_mappers = False
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
.. seealso::
:ref:`mapping_constructors`
:meth:`.InstanceEvents.load`
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
r"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop("include_removes", False)
include_backrefs = kw.pop("include_backrefs", True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs,
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.mapper
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.mapper
if instrumenting_mapper:
instrumenting_mapper._check_configure()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
__slots__ = ("mapper",)
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r"
% (column.table.name, column.name, column.key, prop)
)
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..." % (column, self.mapper)
)
|
py | b40fd642e878a80a787ce82eb096c056bf123840 | #OSBS mining
from src import predict
from src import data
from src import neon_paths
from glob import glob
import pandas as pd
import geopandas as gpd
from src.start_cluster import start
from src import generate
from distributed import wait
import os
import re
import traceback
crop_sensor = True
def find_rgb_files(site, year, config):
tiles = glob(config["rgb_sensor_pool"], recursive=True)
tiles = [x for x in tiles if site in x]
tiles = [x for x in tiles if "/{}/".format(year) in x]
return tiles
def convert(rgb_path, hyperspectral_pool, year, savedir):
#convert .h5 hyperspec tile if needed
basename = os.path.basename(rgb_path)
geo_index = re.search("(\d+_\d+)_image", basename).group(1)
hyperspectral_h5_path = [x for x in hyperspectral_pool if geo_index in x]
hyperspectral_h5_path = [x for x in hyperspectral_h5_path if year in x][0]
tif_basename = os.path.splitext(os.path.basename(rgb_path))[0] + "_hyperspectral.tif"
tif_path = "{}/{}".format(savedir, tif_basename)
if not os.path.exists(tif_path):
tif_path = neon_paths.convert_h5(hyperspectral_h5_path, rgb_path, savedir)
return tif_path
config = data.read_config("config.yml")
tiles = find_rgb_files(site="OSBS", config=config, year="2019")
#generate HSI_tif data if needed.
hyperspectral_pool = glob(config["HSI_sensor_pool"], recursive=True)
rgb_pool = glob(config["rgb_sensor_pool"], recursive=True)
cpu_client = start(cpus=50)
tif_futures = cpu_client.map(convert, tiles, hyperspectral_pool=hyperspectral_pool, savedir = config["HSI_tif_dir"], year="2019")
wait(tif_futures)
species_model_path = "/blue/ewhite/b.weinstein/DeepTreeAttention/snapshots/0abd4a52fcb2453da44ae59740b4a9c8.pl"
dead_model_path = "/orange/idtrees-collab/DeepTreeAttention/Dead/snapshots/9192d967fa324eecb8cf2107e4673a00.pl"
hsi_tifs = []
for x in tif_futures:
try:
hsi_tifs.append(x.result())
except:
pass
if crop_sensor:
pass
else:
cpu_client.close()
gpu_client = start(gpus=1, mem_size="50GB")
#No daemonic dask children
config["workers"] = 0
futures = []
for x in hsi_tifs[:1]:
future = gpu_client.submit(predict.predict_tile, x, dead_model_path = dead_model_path, species_model_path=species_model_path, config=config)
futures.append(future)
wait(futures)
predictions = []
for future in futures:
try:
trees = future.result()
if not trees.empty:
predictions.append(trees)
except Exception as e:
print(e)
print(traceback.print_exc())
predictions = pd.concat(predictions)
predictions = gpd.GeoDataFrame(predictions, geometry="geometry")
predictions.to_file("results/OSBS_predictions.shp")
if crop_sensor:
#format for generate crops
predictions["taxonID"] = predictions["spatial_taxonID"]
predictions["plotID"] = None
predictions["box_id"] = None
predictions["siteID"] = None
annotations = generate.generate_crops(predictions, sensor_glob=config["HSI_sensor_pool"], savedir="/orange/idtrees-collab/DeepTreeAttention/prediction_crops/HSI/", rgb_glob=config["rgb_sensor_pool"], client=None, convert_h5=True, HSI_tif_dir=config["HSI_tif_dir"])
generate.generate_crops(predictions, sensor_glob=config["rgb_sensor_pool"], savedir="/orange/idtrees-collab/DeepTreeAttention/prediction_crops/RGB/", rgb_glob=config["rgb_sensor_pool"], client=client)
generate.generate_crops(predictions, sensor_glob=config["CHM_pool"], savedir="/orange/idtrees-collab/DeepTreeAttention/prediction_crops/CHM/", rgb_glob=config["rgb_sensor_pool"], client=client)
|
py | b40fd88449a48d395a353436aae232388d98afb8 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.appengine_admin_v1.services.authorized_domains import (
AuthorizedDomainsAsyncClient,
)
from google.cloud.appengine_admin_v1.services.authorized_domains import (
AuthorizedDomainsClient,
)
from google.cloud.appengine_admin_v1.services.authorized_domains import pagers
from google.cloud.appengine_admin_v1.services.authorized_domains import transports
from google.cloud.appengine_admin_v1.types import appengine
from google.cloud.appengine_admin_v1.types import domain
from google.oauth2 import service_account
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AuthorizedDomainsClient._get_default_mtls_endpoint(None) is None
assert (
AuthorizedDomainsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
AuthorizedDomainsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
AuthorizedDomainsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AuthorizedDomainsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
AuthorizedDomainsClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [AuthorizedDomainsClient, AuthorizedDomainsAsyncClient,]
)
def test_authorized_domains_client_from_service_account_info(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "appengine.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [AuthorizedDomainsClient, AuthorizedDomainsAsyncClient,]
)
def test_authorized_domains_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "appengine.googleapis.com:443"
def test_authorized_domains_client_get_transport_class():
transport = AuthorizedDomainsClient.get_transport_class()
available_transports = [
transports.AuthorizedDomainsGrpcTransport,
]
assert transport in available_transports
transport = AuthorizedDomainsClient.get_transport_class("grpc")
assert transport == transports.AuthorizedDomainsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AuthorizedDomainsClient, transports.AuthorizedDomainsGrpcTransport, "grpc"),
(
AuthorizedDomainsAsyncClient,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
AuthorizedDomainsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AuthorizedDomainsClient),
)
@mock.patch.object(
AuthorizedDomainsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AuthorizedDomainsAsyncClient),
)
def test_authorized_domains_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AuthorizedDomainsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AuthorizedDomainsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
AuthorizedDomainsClient,
transports.AuthorizedDomainsGrpcTransport,
"grpc",
"true",
),
(
AuthorizedDomainsAsyncClient,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
AuthorizedDomainsClient,
transports.AuthorizedDomainsGrpcTransport,
"grpc",
"false",
),
(
AuthorizedDomainsAsyncClient,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AuthorizedDomainsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AuthorizedDomainsClient),
)
@mock.patch.object(
AuthorizedDomainsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AuthorizedDomainsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_authorized_domains_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AuthorizedDomainsClient, transports.AuthorizedDomainsGrpcTransport, "grpc"),
(
AuthorizedDomainsAsyncClient,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_authorized_domains_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AuthorizedDomainsClient, transports.AuthorizedDomainsGrpcTransport, "grpc"),
(
AuthorizedDomainsAsyncClient,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_authorized_domains_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_authorized_domains_client_client_options_from_dict():
with mock.patch(
"google.cloud.appengine_admin_v1.services.authorized_domains.transports.AuthorizedDomainsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AuthorizedDomainsClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_authorized_domains(
transport: str = "grpc", request_type=appengine.ListAuthorizedDomainsRequest
):
client = AuthorizedDomainsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = appengine.ListAuthorizedDomainsResponse(
next_page_token="next_page_token_value",
)
response = client.list_authorized_domains(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.ListAuthorizedDomainsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAuthorizedDomainsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_authorized_domains_from_dict():
test_list_authorized_domains(request_type=dict)
def test_list_authorized_domains_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AuthorizedDomainsClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
client.list_authorized_domains()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.ListAuthorizedDomainsRequest()
@pytest.mark.asyncio
async def test_list_authorized_domains_async(
transport: str = "grpc_asyncio", request_type=appengine.ListAuthorizedDomainsRequest
):
client = AuthorizedDomainsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
appengine.ListAuthorizedDomainsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_authorized_domains(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == appengine.ListAuthorizedDomainsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAuthorizedDomainsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_authorized_domains_async_from_dict():
await test_list_authorized_domains_async(request_type=dict)
def test_list_authorized_domains_field_headers():
client = AuthorizedDomainsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.ListAuthorizedDomainsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
call.return_value = appengine.ListAuthorizedDomainsResponse()
client.list_authorized_domains(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_authorized_domains_field_headers_async():
client = AuthorizedDomainsAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = appengine.ListAuthorizedDomainsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
appengine.ListAuthorizedDomainsResponse()
)
await client.list_authorized_domains(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_authorized_domains_pager():
client = AuthorizedDomainsClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListAuthorizedDomainsResponse(
domains=[
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
],
next_page_token="abc",
),
appengine.ListAuthorizedDomainsResponse(domains=[], next_page_token="def",),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(),], next_page_token="ghi",
),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(), domain.AuthorizedDomain(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_authorized_domains(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, domain.AuthorizedDomain) for i in results)
def test_list_authorized_domains_pages():
client = AuthorizedDomainsClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListAuthorizedDomainsResponse(
domains=[
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
],
next_page_token="abc",
),
appengine.ListAuthorizedDomainsResponse(domains=[], next_page_token="def",),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(),], next_page_token="ghi",
),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(), domain.AuthorizedDomain(),],
),
RuntimeError,
)
pages = list(client.list_authorized_domains(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_authorized_domains_async_pager():
client = AuthorizedDomainsAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListAuthorizedDomainsResponse(
domains=[
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
],
next_page_token="abc",
),
appengine.ListAuthorizedDomainsResponse(domains=[], next_page_token="def",),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(),], next_page_token="ghi",
),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(), domain.AuthorizedDomain(),],
),
RuntimeError,
)
async_pager = await client.list_authorized_domains(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, domain.AuthorizedDomain) for i in responses)
@pytest.mark.asyncio
async def test_list_authorized_domains_async_pages():
client = AuthorizedDomainsAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_authorized_domains),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
appengine.ListAuthorizedDomainsResponse(
domains=[
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
domain.AuthorizedDomain(),
],
next_page_token="abc",
),
appengine.ListAuthorizedDomainsResponse(domains=[], next_page_token="def",),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(),], next_page_token="ghi",
),
appengine.ListAuthorizedDomainsResponse(
domains=[domain.AuthorizedDomain(), domain.AuthorizedDomain(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_authorized_domains(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AuthorizedDomainsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AuthorizedDomainsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AuthorizedDomainsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AuthorizedDomainsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.AuthorizedDomainsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AuthorizedDomainsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AuthorizedDomainsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = AuthorizedDomainsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AuthorizedDomainsGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AuthorizedDomainsGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.AuthorizedDomainsGrpcTransport,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AuthorizedDomainsClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AuthorizedDomainsGrpcTransport,)
def test_authorized_domains_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.AuthorizedDomainsTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_authorized_domains_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.appengine_admin_v1.services.authorized_domains.transports.AuthorizedDomainsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AuthorizedDomainsTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("list_authorized_domains",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_authorized_domains_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.appengine_admin_v1.services.authorized_domains.transports.AuthorizedDomainsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.AuthorizedDomainsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
def test_authorized_domains_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.appengine_admin_v1.services.authorized_domains.transports.AuthorizedDomainsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.AuthorizedDomainsTransport()
adc.assert_called_once()
def test_authorized_domains_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
AuthorizedDomainsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
def test_authorized_domains_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.AuthorizedDomainsGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AuthorizedDomainsGrpcTransport,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
],
)
def test_authorized_domains_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_authorized_domains_host_no_port():
client = AuthorizedDomainsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="appengine.googleapis.com"
),
)
assert client.transport._host == "appengine.googleapis.com:443"
def test_authorized_domains_host_with_port():
client = AuthorizedDomainsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="appengine.googleapis.com:8000"
),
)
assert client.transport._host == "appengine.googleapis.com:8000"
def test_authorized_domains_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AuthorizedDomainsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_authorized_domains_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AuthorizedDomainsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.AuthorizedDomainsGrpcTransport,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
],
)
def test_authorized_domains_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.AuthorizedDomainsGrpcTransport,
transports.AuthorizedDomainsGrpcAsyncIOTransport,
],
)
def test_authorized_domains_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/appengine.admin",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AuthorizedDomainsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = AuthorizedDomainsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AuthorizedDomainsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = AuthorizedDomainsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = AuthorizedDomainsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AuthorizedDomainsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = AuthorizedDomainsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = AuthorizedDomainsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AuthorizedDomainsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = AuthorizedDomainsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = AuthorizedDomainsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AuthorizedDomainsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AuthorizedDomainsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = AuthorizedDomainsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AuthorizedDomainsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AuthorizedDomainsTransport, "_prep_wrapped_messages"
) as prep:
client = AuthorizedDomainsClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AuthorizedDomainsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AuthorizedDomainsClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
py | b40fd8d8de03bb319c7e61469b4ab7c3dae0995b | #! /usr/bin/env python
import _pickle as c_pickle, gzip
import numpy as np
from tqdm import tqdm
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn as nn
import sys
sys.path.append("..")
import utils
from utils import *
from train_utils import batchify_data, run_epoch, train_model, Flatten
def main():
# Load the dataset
num_classes = 10
X_train, y_train, X_test, y_test = get_MNIST_data()
# We need to rehape the data back into a 1x28x28 image
X_train = np.reshape(X_train, (X_train.shape[0], 1, 28, 28))
X_test = np.reshape(X_test, (X_test.shape[0], 1, 28, 28))
# Split into train and dev
dev_split_index = int(9 * len(X_train) / 10)
X_dev = X_train[dev_split_index:]
y_dev = y_train[dev_split_index:]
X_train = X_train[:dev_split_index]
y_train = y_train[:dev_split_index]
permutation = np.array([i for i in range(len(X_train))])
np.random.shuffle(permutation)
X_train = [X_train[i] for i in permutation]
y_train = [y_train[i] for i in permutation]
# Split dataset into batches
batch_size = 32
train_batches = batchify_data(X_train, y_train, batch_size)
dev_batches = batchify_data(X_dev, y_dev, batch_size)
test_batches = batchify_data(X_test, y_test, batch_size)
#################################
## Model specification TODO
model = nn.Sequential(
nn.Conv2d(1, 32, (3, 3)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(32, 64, (3, 3)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
Flatten(),
nn.Linear(in_features=64*5*5, out_features=128),
nn.Dropout(p=0.5),
nn.Linear(128, 10)
)
##################################
train_model(train_batches, dev_batches, model, nesterov=True)
## Evaluate the model on test data
loss, accuracy = run_epoch(test_batches, model.eval(), None)
print ("Loss on test set:" + str(loss) + " Accuracy on test set: " + str(accuracy))
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321)
main()
|
py | b40fda9b5ba874d8bbe6d1f8171905abfa3d0f2b | import logging
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from cumulusci.cli.config import CliRuntime
from cumulusci.core.config import TaskConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import CURRENT_TASK
from cumulusci.core.utils import import_global
from cumulusci.robotframework.utils import set_pdb_trace
from cumulusci.salesforce_api.utils import get_simple_salesforce_connection
from cumulusci.tasks.robotframework.robotframework import Robot
class CumulusCI(object):
""" Library for accessing CumulusCI for the local git project
This library allows Robot Framework tests to access credentials to a
Salesforce org created by CumulusCI, including Scratch Orgs. It also
exposes the core logic of CumulusCI including interactions with the
Salesforce API's and project specific configuration including custom
and customized tasks and flows.
Initialization requires a single argument, the org name for the target
CumulusCI org. If running your tests via cci's robot task (recommended),
you can initialize the library in your tests taking advantage of the
variable set by the robot task:
| ``*** Settings ***``
|
| Library cumulusci.robotframework.CumulusCI ${ORG}
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, org_name=None):
if not org_name:
org_name = "dev"
self.org_name = org_name
self._project_config = None
self._org = None
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
@property
def project_config(self):
if self._project_config is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's config
return CURRENT_TASK.stack[0].project_config
else:
logger.console("Initializing CumulusCI config\n")
self._project_config = CliRuntime().project_config
return self._project_config
def set_project_config(self, project_config):
logger.console("\n")
self._project_config = project_config
@property
def keychain(self):
return self.project_config.keychain
@property
def org(self):
if self._org is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's org
return CURRENT_TASK.stack[0].org_config
else:
self._org = self.keychain.get_org(self.org_name)
return self._org
@property
def sf(self):
return self._init_api()
@property
def tooling(self):
return self._init_api("tooling/")
def set_login_url(self):
""" Sets the LOGIN_URL variable in the suite scope which will
automatically log into the target Salesforce org.
Typically, this is run during Suite Setup
"""
BuiltIn().set_suite_variable("${LOGIN_URL}", self.org.start_url)
def get_org_info(self):
""" Returns a dictionary of the org information for the current target
Salesforce org
"""
return self.org.config
def login_url(self, org=None):
""" Returns the login url which will automatically log into the target
Salesforce org. By default, the org_name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org.
"""
if org is None:
org = self.org
else:
org = self.keychain.get_org(org)
return org.start_url
def get_community_info(self, community_name, key=None, force_refresh=False):
"""This keyword uses the Salesforce API to get information about a community.
This keyword requires the exact community name as its first argumment.
- If no key is given, all of the information returned by the API will be
returned by this keyword in the form of a dictionary
- If a key is given, only the value for that key will be returned.
Some of the supported keys include name, siteUrl, and
loginUrl. For a comprehensive list see the
[https://developer.salesforce.com/docs/atlas.en-us.chatterapi.meta/chatterapi/connect_responses_community.htm|API documentation],
or call this keyword without the key argument and examine the
results.
An API call will be made the first time this keyword is used, and
the return values will be cached. Subsequent calls will not call
the API unless the requested community name is not in the cached
results, or unless the force_refresh parameter is set to True.
"""
community_info = self.org.get_community_info(
community_name, force_refresh=force_refresh
)
if key is None:
return community_info
else:
if key not in community_info:
raise Exception("Invalid key '{}'".format(key))
return community_info[key]
def get_namespace_prefix(self, package=None):
""" Returns the namespace prefix (including __) for the specified package name.
(Defaults to project__package__name_managed from the current project config.)
Returns an empty string if the package is not installed as a managed package.
"""
result = ""
if package is None:
package = self.project_config.project__package__name_managed
packages = self.tooling.query(
"SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name "
"FROM InstalledSubscriberPackage"
)
match = [
p for p in packages["records"] if p["SubscriberPackage"]["Name"] == package
]
if match:
result = match[0]["SubscriberPackage"]["NamespacePrefix"] + "__"
return result
def run_task(self, task_name, **options):
""" Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
"""
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config)
def run_task_class(self, class_path, **options):
""" Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
"""
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
def _init_api(self, base_url=None):
client = get_simple_salesforce_connection(self.project_config, self.org)
if base_url is not None:
client.base_url += base_url
return client
def _init_task(self, class_path, options, task_config):
task_class = import_global(class_path)
task_config = self._parse_task_options(options, task_class, task_config)
return task_class, task_config
def _parse_task_options(self, options, task_class, task_config):
if "options" not in task_config.config:
task_config.config["options"] = {}
# Parse options and add to task config
if options:
for name, value in options.items():
# Validate the option
if name not in task_class.task_options:
raise TaskOptionsError(
'Option "{}" is not available for task {}'.format(
name, task_class
)
)
# Override the option in the task config
task_config.config["options"][name] = value
return task_config
def _run_task(self, task_class, task_config):
task = task_class(self.project_config, task_config, org_config=self.org)
task()
return task.return_values
def debug(self):
"""Pauses execution and enters the Python debugger."""
set_pdb_trace()
|
py | b40fdada18d7cd59027562748688af2c772cb13a | # Update the code and upload the package to pypi
# 1. python ./setup.py sdist --format=gztar
# 2. twine upload dist/simple_tensorflow_serving-1.0.0.tar.gz
try:
from setuptools import setup
setup()
except ImportError:
from distutils.core import setup
setup(
name="simple_tensorflow_serving",
version="0.3.1",
author="tobe",
author_email="[email protected]",
url="https://github.com/tobegit3hub/simple_tensorflow_serving",
#install_requires=["tensorflow>=1.0.0"],
description=
"The simpler and easy-to-use serving service for TensorFlow models",
packages=[
"simple_tensorflow_serving", "simple_tensorflow_serving.gen_client"
],
#package_data={
# "simple_tensorflow_serving/static": ['simple_tensorflow_serving/templates/*.html', 'simple_tensorflow_serving/static/*']
#},
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": [
"simple_tensorflow_serving=simple_tensorflow_serving.server:main",
],
})
|
py | b40fdba6a9b63708ac62f648e0726e4147c8ba5a | import os
import pika
import json
import sys
input_queue = sys.argv[1]
output_queue = sys.argv[2]
final_queue = sys.argv[3]
conn = pika.BlockingConnection(
pika.ConnectionParameters('localhost'))
channel = conn.channel()
channel.queue_declare(queue=input_queue)
channel.queue_declare(queue=output_queue)
channel.queue_declare(queue=final_queue)
last_message = {}
def store_function(ch, method, properties, body):
global last_message
connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost'))
chan = connection.channel()
filepath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'interface', 'archive_files', input_queue + '_storage.txt')
print(body)
body_json = json.loads(body)
body_json['filepath'] = filepath
val = body_json['val']
final_op = body_json['finalop']
if val == 'FINAL':
if final_op == 'storage':
chan.basic_publish(
exchange='', routing_key=final_queue,
body=json.dumps(last_message))
else:
chan.basic_publish(
exchange='', routing_key=output_queue,
body=json.dumps(body_json))
chan.queue_delete(queue=input_queue)
connection.close()
sys.exit()
return
with open(filepath, 'a') as fp:
fp.write(json.dumps(body_json))
if final_op == 'storage':
last_message = body_json
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
chan = connection.channel()
chan.basic_publish(
exchange='',
routing_key=output_queue,
body=json.dumps(body_json))
connection.close()
channel.basic_consume(queue=input_queue,
auto_ack=True,
on_message_callback=store_function)
print(input_queue + '\t ----> storage() ----> ' + output_queue)
channel.start_consuming()
|
py | b40fdc48bedc14358bd144eabd28f0541644d502 | #!/usr/bin/python
import requests
import xml.etree.ElementTree as ET
LIMIT = 4
def get_length():
return LIMIT
def get_headlines():
r = requests.get("http://yle.fi/uutiset/rss/paauutiset.rss")
root = ET.fromstring(r.text)
data = []
for i, item in enumerate(root.iter("item")):
data.append(item[0].text)
if i == LIMIT:
break
return data |
py | b40fdc6aa6cdac487b75c1f0f378b62d3299ebb6 | from setuptools import setup, find_packages
setup(
name="himawaripy",
version="2.0.1",
url="http://labs.boramalper.org/himawaripy",
author="Mert Bora Alper",
author_email="[email protected]",
license="MIT",
description="Set near-realtime picture of Earth as your desktop background",
long_description="himawaripy is a Python 3 script that fetches near-realtime (10 minutes delayed) picture of Earth "
"as its taken by Himawari 8 (ひまわり8号) and sets it as your desktop background.",
install_requires=["appdirs", "pillow", "python-dateutil"],
packages=find_packages(),
entry_points={"console_scripts": ["himawaripy=himawaripy.__main__:main"]},
)
|
py | b40fdde9295048d2d6291d8cc36ad30b3698b3ba | import csv
import logging
import multiprocessing
from eva_cttv_pipeline.clinvar_xml_utils import ClinVarTrait
from eva_cttv_pipeline.trait_mapping.output import output_trait
from eva_cttv_pipeline.trait_mapping.oxo import get_oxo_results
from eva_cttv_pipeline.trait_mapping.oxo import uris_to_oxo_format
from eva_cttv_pipeline.trait_mapping.trait import Trait
from eva_cttv_pipeline.trait_mapping.trait_names_parsing import parse_trait_names
from eva_cttv_pipeline.trait_mapping.zooma import get_zooma_results
logger = logging.getLogger(__package__)
def get_uris_for_oxo(zooma_result_list: list) -> set:
"""
For a list of Zooma mappings return a list of uris for the mappings in that list with a high
confidence.
:param zooma_result_list: List with elements of class ZoomaResult
:return: set of uris from high confidence Zooma mappings, for which to query OxO
"""
uri_set = set()
for mapping in zooma_result_list:
# Only use high confidence Zooma mappings for querying OxO
if mapping.confidence.lower() == "high":
uri_set.update([entry.uri for entry in mapping.mapping_list])
return uri_set
def process_trait(trait: Trait, filters: dict, zooma_host: str, oxo_target_list: list, oxo_distance: int) -> Trait:
"""
Process a single trait. Find any mappings in Zooma. If there are no high confidence Zooma
mappings that are in EFO then query OxO with any high confidence mappings not in EFO.
:param trait: The trait to be processed.
:param filters: A dictionary of filters to use for querying Zooma.
:param zooma_host: A string with the hostname to use for querying Zooma
:param oxo_target_list: A list of strings, each being an OxO ID for an ontology. Used to specify
which ontologies should be queried using OxO.
:param oxo_distance: int specifying the maximum number of steps to use to query OxO. i.e. OxO's
"distance" parameter.
:return: The original trait after querying Zooma and possibly OxO, with any results found.
"""
logger.debug('Processing trait {}'.format(trait.name))
trait.zooma_result_list = get_zooma_results(trait.name, filters, zooma_host)
trait.process_zooma_results()
if (trait.is_finished
or len(trait.zooma_result_list) == 0
or any([entry.is_current
for mapping in trait.zooma_result_list
for entry in mapping.mapping_list])):
return trait
uris_for_oxo_set = get_uris_for_oxo(trait.zooma_result_list)
oxo_input_id_list = uris_to_oxo_format(uris_for_oxo_set)
if len(oxo_input_id_list) == 0:
return trait
trait.oxo_result_list = get_oxo_results(oxo_input_id_list, oxo_target_list, oxo_distance)
if not trait.oxo_result_list:
logger.debug('No OxO mapping for trait {}'.format(trait.name))
trait.process_oxo_mappings()
return trait
def main(input_filepath, output_mappings_filepath, output_curation_filepath, filters, zooma_host, oxo_target_list,
oxo_distance):
logger.info('Started parsing trait names')
trait_list = parse_trait_names(input_filepath)
logger.info("Loaded {} trait names".format(len(trait_list)))
with open(output_mappings_filepath, "w", newline='') as mapping_file, \
open(output_curation_filepath, "wt") as curation_file:
mapping_writer = csv.writer(mapping_file, delimiter="\t")
mapping_writer.writerow(["#clinvar_trait_name", "uri", "label"])
curation_writer = csv.writer(curation_file, delimiter="\t")
logger.info('Processing trait names in parallel')
trait_process_pool = multiprocessing.Pool(processes=24)
processed_trait_list = [
trait_process_pool.apply(
process_trait,
args=(trait, filters, zooma_host, oxo_target_list, oxo_distance)
)
for trait in trait_list
]
logger.info('Writing output with the processed traits')
for trait in processed_trait_list:
# Remove non-specific trait names which should never be output
if trait.name.lower() not in ClinVarTrait.NONSPECIFIC_TRAITS:
output_trait(trait, mapping_writer, curation_writer)
logger.info('Finished processing trait names')
|
py | b40fddf8569982281a73a857b38bf2b06dcf7b09 | # pylint: disable = unused-import, missing-docstring
from pymedphys._trf.decode.trf2pandas import trf2pandas as read
from pymedphys._trf.manage.identify import identify_logfile as identify
|
py | b40fde869cbc616469011f7869edcb085bf9022b | # coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.classifier.gae_models."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import types
from core.domain import classifier_domain
from core.platform import models
from core.tests import test_utils
import feconf
from typing import List, cast
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import classifier_models
(base_models, classifier_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.classifier])
class ClassifierTrainingJobModelUnitTests(test_utils.GenericTestBase):
"""Test the ClassifierTrainingJobModel class."""
def test_get_deletion_policy(self) -> None:
self.assertEqual(
classifier_models.ClassifierTrainingJobModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_create_and_get_new_training_job_runs_successfully(self) -> None:
next_scheduled_check_time = datetime.datetime.utcnow()
job_id = classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id1', 1,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_NEW, 1)
training_job = (
classifier_models.ClassifierTrainingJobModel.get(job_id)
)
# Ruling out the possibility of None for mypy type checking.
assert training_job is not None
self.assertEqual(training_job.algorithm_id, 'TextClassifier')
self.assertEqual(training_job.interaction_id, 'TextInput')
self.assertEqual(training_job.exp_id, 'exp_id1')
self.assertEqual(training_job.exp_version, 1)
self.assertEqual(training_job.state_name, 'state_name2')
self.assertEqual(
training_job.status,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertEqual(
training_job.training_data,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}])
self.assertEqual(training_job.algorithm_version, 1)
def test_query_new_and_pending_training_jobs(self) -> None:
next_scheduled_check_time = datetime.datetime.utcnow()
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id1', 1,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_NEW, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id2', 2,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_PENDING, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id3', 3,
next_scheduled_check_time + datetime.timedelta(
minutes=feconf.CLASSIFIER_JOB_TTL_MINS),
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_PENDING, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id4', 4,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_FAILED, 1)
training_jobs, offset = (
classifier_models.ClassifierTrainingJobModel.
query_new_and_pending_training_jobs(0))
self.assertEqual(len(training_jobs), 2)
self.assertEqual(training_jobs[0].algorithm_id, 'TextClassifier')
self.assertEqual(training_jobs[0].interaction_id, 'TextInput')
self.assertEqual(training_jobs[0].exp_id, 'exp_id1')
self.assertEqual(training_jobs[0].exp_version, 1)
self.assertEqual(
training_jobs[0].next_scheduled_check_time,
next_scheduled_check_time)
self.assertEqual(training_jobs[0].state_name, 'state_name2')
self.assertEqual(
training_jobs[0].status,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertEqual(
training_jobs[0].training_data,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}])
self.assertEqual(
training_jobs[1].status,
feconf.TRAINING_JOB_STATUS_PENDING)
self.assertEqual(offset, 2)
def test_query_new_and_pending_training_jobs_with_non_zero_offset(
self
) -> None:
with self.swap(
classifier_models, 'NEW_AND_PENDING_TRAINING_JOBS_FETCH_LIMIT', 2):
next_scheduled_check_time = (
datetime.datetime.utcnow() - datetime.timedelta(minutes=1))
# Creating 6 jobs out of which 4 will be fetched in steps.
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id01', 1,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_NEW, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id02', 2,
next_scheduled_check_time + datetime.timedelta(seconds=1),
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_PENDING, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id03', 3,
next_scheduled_check_time + datetime.timedelta(
minutes=feconf.CLASSIFIER_JOB_TTL_MINS),
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_PENDING, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id04', 4,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_FAILED, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id05', 1,
next_scheduled_check_time + datetime.timedelta(seconds=2),
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_NEW, 1)
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id06', 1,
next_scheduled_check_time + datetime.timedelta(seconds=3),
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_PENDING, 1)
training_jobs, offset = (
classifier_models.ClassifierTrainingJobModel.
query_new_and_pending_training_jobs(0))
self.assertEqual(len(training_jobs), 2)
self.assertEqual(training_jobs[0].algorithm_id, 'TextClassifier')
self.assertEqual(training_jobs[0].interaction_id, 'TextInput')
self.assertEqual(training_jobs[0].exp_id, 'exp_id01')
self.assertEqual(training_jobs[0].exp_version, 1)
self.assertEqual(
training_jobs[0].next_scheduled_check_time,
next_scheduled_check_time)
self.assertEqual(training_jobs[0].state_name, 'state_name2')
self.assertEqual(
training_jobs[0].status,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertEqual(
training_jobs[0].training_data,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}])
self.assertEqual(
training_jobs[1].status,
feconf.TRAINING_JOB_STATUS_PENDING)
self.assertEqual(offset, 2)
training_jobs, offset = (
classifier_models.ClassifierTrainingJobModel.
query_new_and_pending_training_jobs(offset))
self.assertEqual(len(training_jobs), 2)
self.assertEqual(training_jobs[0].algorithm_id, 'TextClassifier')
self.assertEqual(training_jobs[0].interaction_id, 'TextInput')
self.assertEqual(training_jobs[0].exp_id, 'exp_id05')
self.assertEqual(training_jobs[0].exp_version, 1)
self.assertEqual(
training_jobs[0].next_scheduled_check_time,
next_scheduled_check_time + datetime.timedelta(seconds=2))
self.assertEqual(training_jobs[0].state_name, 'state_name2')
self.assertEqual(
training_jobs[0].status,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertEqual(
training_jobs[0].training_data,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}])
self.assertEqual(
training_jobs[1].status,
feconf.TRAINING_JOB_STATUS_PENDING)
self.assertEqual(offset, 4)
def test_create_multi_jobs(self) -> None:
next_scheduled_check_time = datetime.datetime.utcnow()
job_dicts_list = []
job_dicts_list.append({
'exp_id': u'1',
'exp_version': 1,
'next_scheduled_check_time': next_scheduled_check_time,
'state_name': 'Home',
'interaction_id': 'TextInput',
'algorithm_id': feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'],
'training_data': [],
'status': feconf.TRAINING_JOB_STATUS_NEW,
'algorithm_version': 1
})
job_dicts_list.append({
'exp_id': u'1',
'exp_version': 2,
'next_scheduled_check_time': next_scheduled_check_time,
'state_name': 'Home',
'interaction_id': 'TextInput',
'algorithm_id': feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'],
'training_data': [],
'status': feconf.TRAINING_JOB_STATUS_NEW,
'algorithm_version': 1
})
job_ids = classifier_models.ClassifierTrainingJobModel.create_multi(
job_dicts_list)
self.assertEqual(len(job_ids), 2)
training_job1 = (
classifier_models.ClassifierTrainingJobModel.get(job_ids[0])
)
# Ruling out the possibility of None for mypy type checking.
assert training_job1 is not None
self.assertEqual(
training_job1.algorithm_id,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'])
self.assertEqual(
training_job1.interaction_id,
'TextInput')
self.assertEqual(training_job1.exp_id, '1')
self.assertEqual(training_job1.exp_version, 1)
self.assertEqual(training_job1.training_data, [])
self.assertEqual(training_job1.state_name, 'Home')
self.assertEqual(
training_job1.status,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertEqual(training_job1.algorithm_version, 1)
training_job2 = (
classifier_models.ClassifierTrainingJobModel.get(job_ids[1])
)
# Ruling out the possibility of None for mypy type checking.
assert training_job2 is not None
self.assertEqual(
training_job2.algorithm_id,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'])
self.assertEqual(
training_job2.interaction_id,
'TextInput')
self.assertEqual(training_job2.exp_id, '1')
self.assertEqual(training_job2.exp_version, 2)
self.assertEqual(training_job2.training_data, [])
self.assertEqual(training_job2.state_name, 'Home')
self.assertEqual(
training_job2.status,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertEqual(training_job2.algorithm_version, 1)
def test_raise_exception_by_mocking_collision(self) -> None:
next_scheduled_check_time = datetime.datetime.utcnow()
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'The id generator for ClassifierTrainingJobModel is '
'producing too many collisions.'
):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
classifier_models.ClassifierTrainingJobModel, 'get_by_id',
types.MethodType(
lambda x, y: True,
classifier_models.ClassifierTrainingJobModel)):
classifier_models.ClassifierTrainingJobModel.create(
'TextClassifier', 'TextInput', 'exp_id1', 1,
next_scheduled_check_time,
[{'answer_group_index': 1, 'answers': ['a1', 'a2']}],
'state_name2', feconf.TRAINING_JOB_STATUS_NEW, 1)
class StateTrainingJobsMappingModelUnitTests(test_utils.GenericTestBase):
"""Tests for the StateTrainingJobsMappingModel class."""
def test_get_deletion_policy(self) -> None:
self.assertEqual(
classifier_models.StateTrainingJobsMappingModel
.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_create_and_get_new_mapping_runs_successfully(self) -> None:
mapping_id = (
classifier_models.StateTrainingJobsMappingModel.create(
'exp_id1', 2, 'state_name4', {'algorithm_id': 'job_id4'}))
mapping = classifier_models.StateTrainingJobsMappingModel.get(
mapping_id)
# Ruling out the possibility of None for mypy type checking.
assert mapping is not None
self.assertEqual(mapping.exp_id, 'exp_id1')
self.assertEqual(mapping.exp_version, 2)
self.assertEqual(mapping.state_name, 'state_name4')
self.assertEqual(
mapping.algorithm_ids_to_job_ids, {'algorithm_id': 'job_id4'})
# Test that exception is raised when creating mapping with same id.
with self.assertRaisesRegexp(Exception, ( # type: ignore[no-untyped-call]
'A model with the same ID already exists.')):
mapping_id = (
classifier_models.StateTrainingJobsMappingModel.create(
'exp_id1', 2, 'state_name4', {'algorithm_id': 'job_id4'}))
# Test that state names with unicode characters get saved correctly.
state_name1 = 'Klüft'
mapping_id = (
classifier_models.StateTrainingJobsMappingModel.create(
'exp_id1', 2, state_name1, {'algorithm_id': 'job_id4'}))
classifier_models.StateTrainingJobsMappingModel.get(mapping_id)
self.assertEqual(mapping_id, 'exp_id1.2.%s' % state_name1)
# Ruling out the possibility of None for mypy type checking.
assert mapping is not None
state_name2 = 'टेक्स्ट'
mapping_id = (
classifier_models.StateTrainingJobsMappingModel.create(
'exp_id1', 2, state_name2, {'algorithm_id': 'job_id4'}))
classifier_models.StateTrainingJobsMappingModel.get(mapping_id)
# Ruling out the possibility of None for mypy type checking.
assert mapping is not None
self.assertEqual(mapping_id, 'exp_id1.2.%s' % state_name2)
def test_get_model_from_exploration_attributes(self) -> None:
exp_id = 'exp_id1'
exp_version = 1
state_name = 'state_name1'
job_id = 'job_id1'
classifier_models.StateTrainingJobsMappingModel.create(
exp_id, exp_version, state_name, {'algorithm_id': job_id})
mappings = (
classifier_models.StateTrainingJobsMappingModel.get_models(
exp_id, exp_version, [state_name]))
# Ruling out the possibility of None for mypy type checking.
assert mappings[0] is not None
self.assertEqual(len(mappings), 1)
self.assertEqual(mappings[0].exp_id, exp_id)
self.assertEqual(mappings[0].exp_version, 1)
self.assertEqual(mappings[0].state_name, state_name)
self.assertDictEqual(
mappings[0].algorithm_ids_to_job_ids, {'algorithm_id': job_id})
def test_create_multi_mappings(self) -> None:
state_training_jobs_mappings: List[
classifier_domain.StateTrainingJobsMapping] = []
state_training_jobs_mappings.append(
classifier_domain.StateTrainingJobsMapping( # type: ignore[no-untyped-call]
u'1', 1, 'Home', {'algorithm_id': 'job_id1'}))
state_training_jobs_mappings.append(
classifier_domain.StateTrainingJobsMapping( # type: ignore[no-untyped-call]
u'1', 2, 'Home', {'algorithm_id': 'job_id2'}))
state_training_jobs_mappings_model = cast(
List[classifier_models.StateTrainingJobsMappingModel],
state_training_jobs_mappings)
mapping_ids = (
classifier_models.StateTrainingJobsMappingModel.create_multi(
state_training_jobs_mappings_model))
self.assertEqual(len(mapping_ids), 2)
mapping1 = (
classifier_models.StateTrainingJobsMappingModel.get(
mapping_ids[0]))
# Ruling out the possibility of None for mypy type checking.
assert mapping1 is not None
self.assertEqual(mapping1.exp_id, '1')
self.assertEqual(mapping1.exp_version, 1)
self.assertDictEqual(
mapping1.algorithm_ids_to_job_ids, {'algorithm_id': 'job_id1'})
self.assertEqual(mapping1.state_name, 'Home')
mapping2 = (
classifier_models.StateTrainingJobsMappingModel.get(
mapping_ids[1]))
# Ruling out the possibility of None for mypy type checking.
assert mapping2 is not None
self.assertEqual(mapping2.exp_id, '1')
self.assertEqual(mapping2.exp_version, 2)
self.assertEqual(
mapping2.algorithm_ids_to_job_ids, {'algorithm_id': 'job_id2'})
self.assertEqual(mapping2.state_name, 'Home')
|
py | b40fde9fc8326ece1cec6332da2e7ba9bf048e97 | # PYTRIS Copyright (c) 2017 Jason Kim All Rights Reserved.
import pygame
import operator
from mino import *
from random import *
from pygame.locals import *
from tkinter import *
# Define
block_size = 17 # Height, width of single block
width = 10 # Board width
height = 20 # Board height
board_width = 800
board_height = 450
block_size = int(board_height*0.045)
framerate = 30 # Bigger -> Slower
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
pygame.display.set_caption("OM TETRIS")
class ui_variables:
# Fonts
font_path = "./assets/fonts/OpenSans-Light.ttf"
font_path_b = "./assets/fonts/OpenSans-Bold.ttf"
font_path_i = "./assets/fonts/Inconsolata/Inconsolata.otf"
h1 = pygame.font.Font(font_path_b, 80)
h2 = pygame.font.Font(font_path_b, 30)
h4 = pygame.font.Font(font_path_b, 20)
h5 = pygame.font.Font(font_path_b, 13)
h6 = pygame.font.Font(font_path_b, 10)
h1_b = pygame.font.Font(font_path_b, 50)
h2_b = pygame.font.Font(font_path_b, 30)
h2_i = pygame.font.Font(font_path_i, 30)
h5_i = pygame.font.Font(font_path_i, 13)
# Sounds
pygame.mixer.music.load("assets/sounds/SFX_BattleMusic.wav")
pygame.mixer.music.set_volume(0.3)
intro_sound = pygame.mixer.Sound("assets/sounds/SFX_Intro.wav")
fall_sound = pygame.mixer.Sound("assets/sounds/SFX_Fall.wav")
break_sound = pygame.mixer.Sound("assets/sounds/SFX_Break.wav")
click_sound = pygame.mixer.Sound("assets/sounds/SFX_ButtonUp.wav")
move_sound = pygame.mixer.Sound("assets/sounds/SFX_PieceMoveLR.wav")
drop_sound = pygame.mixer.Sound("assets/sounds/SFX_PieceHardDrop.wav")
single_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearSingle.wav")
double_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearDouble.wav")
triple_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearTriple.wav")
tetris_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialTetris.wav")
LevelUp_sound = pygame.mixer.Sound("assets/sounds/SFX_LevelUp.wav")
GameOver_sound = pygame.mixer.Sound("assets/sounds/SFX_GameOver.wav")
# Combo graphic
combos = []
large_combos = []
combo_ring = pygame.image.load("assets/Combo/4combo ring.png") # 4블록 동시제거 그래픽
combo_4ring = pygame.transform.smoothscale(combo_ring, (200, 100))
for i in range(1, 11) :
combos.append(pygame.image.load("assets/Combo/"+str(i)+"combo.png"))
large_combos.append(pygame.transform.smoothscale(combos[i-1], (150, 200)))
combos_sound = []
for i in range(1, 10) :
combos_sound.append(pygame.mixer.Sound("assets/sounds/SFX_"+str(i+2)+"Combo.wav"))
# Background colors
black = (10, 10, 10) #rgb(10, 10, 10)
white = (0, 153, 153) #rgb(255, 255, 255) # 청록색으로 변경
real_white = (255, 255, 255) #rgb(255, 255, 255) # 청록색으로 변경
grey_1 = (70, 130, 180) #rgb(26, 26, 26) 테두리 파랑색
grey_2 = (221, 221,221) #rgb(35, 35, 35)
grey_3 = (000, 000, 139) #rgb(55, 55, 55)
bright_yellow = (255,217,102) # 밝은 노랑
# Tetrimino colors
cyan = (10, 255, 226) #rgb(69, 206, 204) # I
blue = (64, 105, 255) #rgb(64, 111, 249) # J
orange = (245, 144, 12) #rgb(253, 189, 53) # L
yellow = (225, 242, 41) #rgb(246, 227, 90) # O
green = (22, 181, 64) #rgb(98, 190, 68) # S
pink = (242, 41, 195) #rgb(242, 64, 235) # T
red = (204, 22, 22) #rgb(225, 13, 27) # Z
t_color = [grey_2, cyan, blue, orange, yellow, green, pink, red, grey_3]
cyan_image = 'assets/block_images/cyan.png'
blue_image = 'assets/block_images/blue.png'
orange_image = 'assets/block_images/orange.png'
yellow_image = 'assets/block_images/yellow.png'
green_image = 'assets/block_images/green.png'
pink_image = 'assets/block_images/purple.png'
red_image = 'assets/block_images/red.png'
ghost_image = 'assets/block_images/ghost.png'
table_image = 'assets/block_images/background.png'
t_block = [table_image,cyan_image,blue_image,orange_image,yellow_image,green_image,pink_image,red_image,ghost_image]
class button():
def __init__(self, x, y, width, height, id, img = ''):
self.x = x
self.y = y
self.width =width
self.height = height
self.id = id
self.image = img
def draw(self, win , outline = None):
if outline:
draw_image(screen, self.image, self.x, self.y, self.width, self.height)
def isOver(self,pos):
if pos[0] > self.x-(self.width/2) and pos[0] < self.x + (self.width/2):
if pos[1] > self.y-(self.height/2) and pos[1] < self.y + (self.height/2):
return True
return False
start_image = 'assets/images/start.png'
help_image = 'assets/images/help.png'
start_button = button(board_width*0.5, board_height*0.5,146,43,1,start_image)
background_image = 'assets/vector/Background.png'
single_button_image = 'assets/vector/single_button.png'
clicked_single_button_image = 'assets/vector/clicked_single_button.png'
pvp_button_image = 'assets/vector/pvp_button.png'
clicked_pvp_button_image = 'assets/vector/clicked_pvp_button.png'
help_button_image = 'assets/vector/help_button.png'
clicked_help_button_image = 'assets/vector/clicked_help_button.png'
quit_button_image = 'assets/vector/quit_button.png'
clicked_quit_button_image = 'assets/vector/clicked_quit_button.png'
leaderboard_vector = 'assets/vector/leaderboard_vector.png'
clicked_leaderboard_vector = 'assets/vector/clicked_leader_vector.png'
setting_vector = 'assets/vector/setting_vector.png'
clicked_setting_vector = 'assets/vector/clicked_setting_vector.png'
pause_board_image= 'assets/vector/pause_board.png'
leader_board_image= 'assets/vector/leader_board.png'
setting_board_image = 'assets/vector/setting_board.png'
gameover_board_image = 'assets/vector/gameover_board.png'
resume_button_image = 'assets/vector/resume_button.png'
clicked_resume_button_image = 'assets/vector/clicked_resume_button.png'
restart_button_image = 'assets/vector/restart_button.png'
clicked_restart_button_image = 'assets/vector/clicked_restart_button.png'
setting_button_image = 'assets/vector/setting_button.png'
clicked_setting_button_image = 'assets/vector/clicked_setting_button.png'
back_button_image = 'assets/vector/back_button.png'
clicked_back_button_image = 'assets/vector/clicked_back_button.png'
volume_vector = 'assets/vector/volume_vector.png'
clicked_volume_vector = 'assets/vector/clicked_volume_vector.png'
keyboard_vector = 'assets/vector/keyboard_vector.png'
clicked_keyboard_vector = 'assets/vector/clicked_keyboard_vector.png'
screen_vector = 'assets/vector/screen_vector.png'
clicked_screen_vector = 'assets/vector/clicked_screen_vector.png'
menu_button_image = 'assets/vector/menu_button.png'
clicked_menu_button_image = 'assets/vector/clicked_menu_button.png'
ok_button_image = 'assets/vector/ok_button.png'
clicked_ok_button_image = 'assets/vector/clicked_ok_button.png'
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
volume = 1.0
tetris3 = pygame.image.load("assets/images/tetris3.png")
tetris4 = pygame.transform.smoothscale(tetris3, (200, 150))
def draw_image(window,img_path, x,y,width,height):
x= x-(width/2)
y= y-(height/2)
image= pygame.image.load(img_path)
image = pygame.transform.smoothscale(image,(width,height))
window.blit(image,(x,y))
# Draw block
def draw_block(x, y, color):
pygame.draw.rect(
screen,
color,
Rect(x, y, block_size, block_size)
)
pygame.draw.rect(
screen,
ui_variables.grey_1,
Rect(x, y, block_size, block_size),
1
)
def draw_block_image(x,y,image):
draw_image(screen,image,x,y,block_size,block_size)
# Draw game screen
def draw_board(next, hold, score, level, goal):
sidebar_width = int(board_width*0.5312)
# Draw sidebar
pygame.draw.rect(
screen,
ui_variables.white,
Rect(sidebar_width, 0, int(board_width*0.1875), board_height)
)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4):
for j in range(4):
dx = int(board_width*0.045)+sidebar_width + block_size * j
dy = int(board_height*0.3743) + block_size * i
if grid_n[i][j] != 0:
##draw_block(dx,dy,ui_variables.t_color[grid_n[i][j]])
draw_block_image(dx,dy,ui_variables.t_block[grid_n[i][j]])
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = int(board_width*0.045) + sidebar_width + block_size * j
dy = int(board_height*0.1336) + block_size * i
if grid_h[i][j] != 0:
##draw_block(dx,dy,ui_variables.t_color[grid_h[i][j]])
draw_block_image(dx,dy,ui_variables.t_block[grid_h[i][j]])
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.h5.render("HOLD", 1, ui_variables.real_white)
text_next = ui_variables.h5.render("NEXT", 1, ui_variables.real_white)
text_score = ui_variables.h5.render("SCORE", 1, ui_variables.real_white)
score_value = ui_variables.h4.render(str(score), 1, ui_variables.real_white)
text_level = ui_variables.h5.render("LEVEL", 1, ui_variables.real_white)
level_value = ui_variables.h4.render(str(level), 1, ui_variables.real_white)
text_combo = ui_variables.h5.render("COMBO", 1, ui_variables.real_white)
combo_value = ui_variables.h4.render(str(combo_count), 1, ui_variables.real_white)
# Place texts
screen.blit(text_hold, (int(board_width*0.045)+sidebar_width, int(board_height*0.0374)))
screen.blit(text_next, (int(board_width*0.045)+sidebar_width , int(board_height*0.2780)))
screen.blit(text_score, (int(board_width*0.045) + sidebar_width, int(board_height*0.5187)))
screen.blit(score_value, (int(board_width*0.055) + sidebar_width, int(board_height*0.5614)))
screen.blit(text_level, (int(board_width*0.045) + sidebar_width, int(board_height*0.6791)))
screen.blit(level_value, (int(board_width*0.055) + sidebar_width , int(board_height*0.7219)))
screen.blit(text_combo, (int(board_width*0.045) + sidebar_width , int(board_height*0.8395)))
screen.blit(combo_value, (int(board_width*0.055) + sidebar_width, int(board_height*0.8823)))
# Draw board
for x in range(width):
for y in range(height):
dx = int(board_width*0.25) + block_size * x
dy = int(board_height*0.055) + block_size * y
## draw_block(dx, dy, ui_variables.t_color[matrix[x][y + 1]])
draw_block_image(dx,dy,ui_variables.t_block[matrix[x][y + 1]])
def draw_1Pboard(next, hold, score, level, goal):
sidebar_width = int(board_width*0.2867)
# Draw sidebar
pygame.draw.rect(
screen,
ui_variables.white,
Rect(sidebar_width, 0, int(board_width*0.1875), board_height)
)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4):
for j in range(4):
dx = int(board_width*0.045)+sidebar_width + block_size * j
dy = int(board_height*0.3743) + block_size * i
if grid_n[i][j] != 0:
## draw_block(dx,dy,ui_variables.t_color[grid_n[i][j]])
draw_block_image(dx,dy,ui_variables.t_block[grid_n[i][j]])
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = int(board_width*0.045) + sidebar_width + block_size * j
dy = int(board_height*0.1336) + block_size * i
if grid_h[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_h[i][j]])
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.h5.render("HOLD", 1, ui_variables.real_white)
text_next = ui_variables.h5.render("NEXT", 1, ui_variables.real_white)
text_score = ui_variables.h5.render("SCORE", 1, ui_variables.real_white)
score_value = ui_variables.h4.render(str(score), 1, ui_variables.real_white)
text_level = ui_variables.h5.render("LEVEL", 1, ui_variables.real_white)
level_value = ui_variables.h4.render(str(level), 1, ui_variables.real_white)
text_combo = ui_variables.h5.render("COMBO", 1, ui_variables.real_white)
combo_value = ui_variables.h4.render(str(combo_count), 1, ui_variables.real_white)
# Place texts
screen.blit(text_hold, (int(board_width*0.045)+sidebar_width, int(board_height*0.0374)))
screen.blit(text_next, (int(board_width*0.045)+sidebar_width , int(board_height*0.2780)))
screen.blit(text_score, (int(board_width*0.045) + sidebar_width, int(board_height*0.5187)))
screen.blit(score_value, (int(board_width*0.055) + sidebar_width, int(board_height*0.5614)))
screen.blit(text_level, (int(board_width*0.045) + sidebar_width, int(board_height*0.6791)))
screen.blit(level_value, (int(board_width*0.055) + sidebar_width , int(board_height*0.7219)))
screen.blit(text_combo, (int(board_width*0.045) + sidebar_width , int(board_height*0.8395)))
screen.blit(combo_value, (int(board_width*0.055) + sidebar_width, int(board_height*0.8823)))
# Draw board
for x in range(width):
for y in range(height):
dx = int(board_height*0.055) + block_size * x
dy = int(board_height*0.055) + block_size * y
draw_block(dx, dy, ui_variables.t_color[matrix[x][y + 1]])
def draw_2Pboard(next, hold, score, level, goal):
sidebar_width = int(board_width*0.7867)
# Draw sidebar
pygame.draw.rect(
screen,
ui_variables.white,
Rect(sidebar_width, 0, int(board_width*0.1875), board_height)
)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4): # 16개의 그리드 칸에서 true인 값만 뽑아서 draw.rect
for j in range(4):
dx = int(board_width*0.045)+sidebar_width + block_size * j
dy = int(board_height*0.3743) + block_size * i
if grid_n[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_n[i][j]]) # 다음 블럭의 형상 가독성을 높임.
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = int(board_width*0.045) + sidebar_width + block_size * j
dy = int(board_height*0.1336) + block_size * i
if grid_h[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_h[i][j]])
# Set max score
if score > 999999:
score = 999999
text_hold = ui_variables.h5.render("HOLD", 1, ui_variables.real_white)
text_next = ui_variables.h5.render("NEXT", 1, ui_variables.real_white)
text_score = ui_variables.h5.render("SCORE", 1, ui_variables.real_white)
score_value = ui_variables.h4.render(str(score), 1, ui_variables.real_white)
text_level = ui_variables.h5.render("LEVEL", 1, ui_variables.real_white)
level_value = ui_variables.h4.render(str(level), 1, ui_variables.real_white)
text_combo = ui_variables.h5.render("COMBO", 1, ui_variables.real_white)
combo_value = ui_variables.h4.render(str(combo_count), 1, ui_variables.real_white)
# Place texts
screen.blit(text_hold, (int(board_width*0.045)+sidebar_width, int(board_height*0.0374)))
screen.blit(text_next, (int(board_width*0.045)+sidebar_width , int(board_height*0.2780)))
screen.blit(text_score, (int(board_width*0.045) + sidebar_width, int(board_height*0.5187)))
screen.blit(score_value, (int(board_width*0.055) + sidebar_width, int(board_height*0.5614)))
screen.blit(text_level, (int(board_width*0.045) + sidebar_width, int(board_height*0.6791)))
screen.blit(level_value, (int(board_width*0.055) + sidebar_width , int(board_height*0.7219)))
screen.blit(text_combo, (int(board_width*0.045) + sidebar_width , int(board_height*0.8395)))
screen.blit(combo_value, (int(board_width*0.055) + sidebar_width, int(board_height*0.8823)))
# Draw board
for x in range(width):
for y in range(height):
dx = int(board_width*0.5) + block_size * x
dy = int(board_height*0.055) + block_size * y
draw_block(dx, dy, ui_variables.t_color[matrix_2P[x][y + 1]])
# Draw a tetrimino
def draw_mino(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
tx, ty = x, y
while not is_bottom(tx, ty, mino, r):
ty += 1
# Draw ghost
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[tx + j][ty + i] = 8
# Draw mino
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[x + j][y + i] = grid[i][j]
def draw_mino_2P(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
tx, ty = x, y
while not is_bottom_2P(tx, ty, mino, r):
ty += 1
# Draw ghost
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix_2P[tx + j][ty + i] = 8
# Draw mino
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix_2P[x + j][y + i] = grid[i][j]
# Erase a tetrimino
def erase_mino(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
# Erase ghost
for j in range(21):
for i in range(10):
if matrix[i][j] == 8:
matrix[i][j] = 0
# Erase mino
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[x + j][y + i] = 0
def erase_mino_2P(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
# Erase ghost
for j in range(21):
for i in range(10):
if matrix_2P[i][j] == 8:
matrix_2P[i][j] = 0
# Erase mino
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix_2P[x + j][y + i] = 0
# Returns true if mino is at bottom
def is_bottom(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (y + i + 1) > 20:
return True
elif matrix[x + j][y + i + 1] != 0 and matrix[x + j][y + i + 1] != 8:
return True
return False
def is_bottom_2P(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (y + i + 1) > 20:
return True
elif matrix_2P[x + j][y + i + 1] != 0 and matrix_2P[x + j][y + i + 1] != 8:
return True
return False
# Returns true if mino is at the left edge
def is_leftedge(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j - 1) < 0:
return True
elif matrix[x + j - 1][y + i] != 0:
return True
return False
def is_leftedge_2P(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j - 1) < 0:
return True
elif matrix_2P[x + j - 1][y + i] != 0:
return True
return False
# Returns true if mino is at the right edge
def is_rightedge(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j + 1) > 9:
return True
elif matrix[x + j + 1][y + i] != 0:
return True
return False
def is_rightedge_2P(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j + 1) > 9:
return True
elif matrix_2P[x + j + 1][y + i] != 0:
return True
return False
# Returns true if turning right is possible
def is_turnable_r(x, y, mino, r):
if r != 3:
grid = tetrimino.mino_map[mino - 1][r + 1]
else:
grid = tetrimino.mino_map[mino - 1][0]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j) < 0 or (x + j) > 9 or (y + i) < 0 or (y + i) > 20:
return False
elif matrix[x + j][y + i] != 0:
return False
return True
def is_turnable_r_2P(x, y, mino, r):
if r != 3:
grid = tetrimino.mino_map[mino - 1][r + 1]
else:
grid = tetrimino.mino_map[mino - 1][0]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j) < 0 or (x + j) > 9 or (y + i) < 0 or (y + i) > 20:
return False
elif matrix_2P[x + j][y + i] != 0:
return False
return True
# Returns true if turning left is possible
def is_turnable_l(x, y, mino, r):
if r != 0:
grid = tetrimino.mino_map[mino - 1][r - 1]
else:
grid = tetrimino.mino_map[mino - 1][3]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j) < 0 or (x + j) > 9 or (y + i) < 0 or (y + i) > 20:
return False
elif matrix[x + j][y + i] != 0:
return False
return True
def is_turnable_l(x, y, mino, r):
if r != 0:
grid = tetrimino.mino_map[mino - 1][r - 1]
else:
grid = tetrimino.mino_map[mino - 1][3]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j) < 0 or (x + j) > 9 or (y + i) < 0 or (y + i) > 20:
return False
elif matrix[x + j][y + i] != 0:
return False
return True
# Returns true if new block is drawable
def is_stackable(mino):
grid = tetrimino.mino_map[mino - 1][0]
for i in range(4):
for j in range(4):
#print(grid[i][j], matrix[3 + j][i])
if grid[i][j] != 0 and matrix[3 + j][i] != 0:
return False
return True
def is_stackable_2P(mino):
grid = tetrimino.mino_map[mino - 1][0]
for i in range(4):
for j in range(4):
#print(grid[i][j], matrix[3 + j][i])
if grid[i][j] != 0 and matrix_2P[3 + j][i] != 0:
return False
return True
def draw_multiboard(next_1P,hold_1P,next_2P,hold_2P,score,level,goal):
screen.fill(ui_variables.real_white)
draw_1Pboard(next_1P,hold_1P,score,level,goal)
draw_2Pboard(next_2P,hold_2P,score,level,goal)
def set_vol(val):
volume = int(val)/100
print(volume)
ui_variables.click_sound.set_volume(volume)
# Initial values
blink = False
start = False
pause = False
done = False
game_over = False
leader_board = False
setting = False
pvp = False
help = False
combo_count = 0
score = 0
level = 1
goal = level * 5
bottom_count = 0
hard_drop = False
dx, dy = 3, 0 # Minos location status
rotation = 0 # Minos rotation status
mino = randint(1, 7) # Current mino
next_mino = randint(1, 7) # Next mino
hold = False # Hold status
hold_mino = -1 # Holded mino
hold_mino_2P = -1
bottom_count_2P = 0
hard_drop_2P = False
hold_2P = False
next_mino_2P = randint(1,7)
mino_2P = randint(1,7)
rotation_2P = 0
dx_2P , dy_2P = 3, 0
name_location = 0
name = [65, 65, 65]
previous_time = pygame.time.get_ticks()
current_time = pygame.time.get_ticks()
pause_time = pygame.time.get_ticks()
with open('leaderboard.txt') as f:
lines = f.readlines()
lines = [line.rstrip('\n') for line in open('leaderboard.txt')]
leaders = {'AAA': 0, 'BBB': 0, 'CCC': 0}
for i in lines:
leaders[i.split(' ')[0]] = int(i.split(' ')[1])
leaders = sorted(leaders.items(), key=operator.itemgetter(1), reverse=True)
matrix = [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
matrix_2P = [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
###########################################################
# Loop Start
###########################################################
volume = 1.0
ui_variables.click_sound.set_volume(volume)
pygame.mixer.init()
ui_variables.intro_sound.set_volume(0.1)
ui_variables.intro_sound.play()
game_status = ''
ui_variables.break_sound.set_volume(0.2)
while not done:
# Pause screen
ui_variables.click_sound.set_volume(volume)
if setting :
draw_image(screen,background_image,board_width*0.5,board_height*0.5,board_width,board_height)
single_button.draw(screen,(0,0,0))
pvp_button.draw(screen,(0,0,0))
help_button.draw(screen,(0,0,0))
quit_button.draw(screen,(0,0,0))
setting_icon.draw(screen,(0,0,0))
leaderboard_icon.draw(screen,(0,0,0))
if start:
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
if pvp:
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
draw_image(screen,setting_board_image, board_width*0.5,board_height*0.5, int(board_height*1.3), board_height)
keyboard_icon.draw(screen,(0,0,0))
screen_icon.draw(screen,(0,0,0))
volume_icon.draw(screen,(0,0,0))
back_button.draw(screen,(0,0,0))
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
pause_text = ui_variables.h2_b.render("PAUSED", 1, ui_variables.real_white)
pause_start = ui_variables.h5.render("Press esc to continue", 1, ui_variables.real_white)
pygame.display.update()
elif event.type == pygame.MOUSEMOTION:
if back_button.isOver(pos):
back_button.image = clicked_back_button_image
else :
back_button.image = back_button_image
if volume_icon.isOver(pos):
volume_icon.image = clicked_volume_vector
else :
volume_icon.image = volume_vector
if keyboard_icon.isOver(pos):
keyboard_icon.image = clicked_keyboard_vector
else :
keyboard_icon.image = keyboard_vector
if screen_icon.isOver(pos):
screen_icon.image = clicked_screen_vector
else :
screen_icon.image = screen_vector
pygame.display.update()
elif event.type == pygame.MOUSEBUTTONDOWN:
if back_button.isOver(pos):
ui_variables.click_sound.play()
setting = False
if volume_icon.isOver(pos):
ui_variables.click_sound.play()
root = Tk()
root.geometry('300x300')
root.title("Volume Setting")
text = Label(root,text='Setting Volume!')
text.pack()
scale = Scale(root, from_ =100, to =0, orient = VERTICAL , command = set_vol )
scale.set(50)
scale.pack()
root.mainloop()
if keyboard_icon.isOver(pos):
ui_variables.click_sound.play()
if screen_icon.isOver(pos):
ui_variables.click_sound.play()
elif event.type == VIDEORESIZE:
block_size = int(board_height*0.045)
board_width = event.w
board_height = event.h
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
elif pause:
pygame.mixer.music.pause()
#screen.fill(ui_variables.real_white)
#draw_board(next_mino, hold_mino, score, level, goal)
if start:
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
if pvp:
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
draw_image(screen ,pause_board_image,board_width*0.5,board_height*0.5, int(board_height*0.7428), board_height)
resume_button.draw(screen,(0,0,0))
restart_button.draw(screen,(0,0,0))
setting_button.draw(screen,(0,0,0))
pause_quit_button.draw(screen,(0,0,0))
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
pause_text = ui_variables.h2_b.render("PAUSED", 1, ui_variables.real_white)
pause_start = ui_variables.h5.render("Press esc to continue", 1, ui_variables.real_white)
pygame.display.update()
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation)
if event.key == K_ESCAPE:
pause = False
ui_variables.click_sound.play()
pygame.mixer.music.unpause()
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.type == pygame.MOUSEMOTION:
if resume_button.isOver(pos):
resume_button.image = clicked_resume_button_image
else :
resume_button.image = resume_button_image
if restart_button.isOver(pos):
restart_button.image = clicked_restart_button_image
else :
restart_button.image = restart_button_image
if setting_button.isOver(pos):
setting_button.image = clicked_setting_button_image
else :
setting_button.image = setting_button_image
if pause_quit_button.isOver(pos):
pause_quit_button.image = clicked_quit_button_image
else :
pause_quit_button.image = quit_button_image
pygame.display.update()
elif event.type == pygame.MOUSEBUTTONDOWN:
if pause_quit_button.isOver(pos):
ui_variables.click_sound.play()
done=True
if setting_button.isOver(pos):
ui_variables.click_sound.play()
setting = True
if restart_button.isOver(pos):
ui_variables.click_sound.play()
hold = False
dx, dy = 3, 0
rotation = 0
mino = randint(1, 7)
next_mino = randint(1, 7)
hold_mino = -1
framerate = 30
score = 0
score = 0
level = 1
combo_count = 0
goal = level * 5
bottom_count = 0
hard_drop = False
name_location = 0
name = [65, 65, 65]
matrix = [[0 for y in range(height + 1)] for x in range(width)]
pause = False
start = False
hold_mino_2P = -1 #
bottom_count_2P = 0 #
hard_drop_2P = False #
hold_2P = False #
next_mino_2P = randint(1,7) #
mino_2P = randint(1,7) #
rotation_2P = 0 #
dx_2P , dy_2P = 3, 0 #
matrix_2P = [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
if pvp :
pvp=False
if resume_button.isOver(pos):
pygame.mixer.music.unpause()
pause = False
ui_variables.click_sound.play()
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
elif help :
draw_image(screen,background_image,board_width*0.5,board_height*0.5,board_width,board_height)
single_button.draw(screen,(0,0,0))
pvp_button.draw(screen,(0,0,0))
help_button.draw(screen,(0,0,0))
quit_button.draw(screen,(0,0,0))
setting_icon.draw(screen,(0,0,0))
leaderboard_icon.draw(screen,(0,0,0))
draw_image(screen ,'assets/vector/help_board.png', board_width*0.5,board_height*0.5, int(board_height*1.3), board_height)
draw_image(screen ,'assets/vector/help_contents.png', board_width*0.5,board_height*0.5, int(board_height*1.1), int(board_height*0.55))
#draw_image(screen ,'assets/images/help_image.png', board_width*0.15, 0, int(board_width*0.7), board_height)
back_button.draw(screen,(0,0,0))
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
pygame.display.update()
elif event.type == pygame.MOUSEMOTION:
if back_button.isOver(pos):
back_button.image = clicked_back_button_image
else :
back_button.image = back_button_image
pygame.display.update()
elif event.type == pygame.MOUSEBUTTONDOWN:
if back_button.isOver(pos):
ui_variables.click_sound.play()
help=False
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
# Game screen
elif leader_board :
draw_image(screen,background_image,board_width*0.5,board_height*0.5,board_width,board_height)
single_button.draw(screen,(0,0,0))
pvp_button.draw(screen,(0,0,0))
help_button.draw(screen,(0,0,0))
quit_button.draw(screen,(0,0,0))
setting_icon.draw(screen,(0,0,0))
leaderboard_icon.draw(screen,(0,0,0))
draw_image(screen,leader_board_image, board_width*0.5,board_height*0.5, int(board_height*1.3), board_height)
back_button.draw(screen,(0,0,0))
leader_1 = ui_variables.h1_b.render('1st ' + leaders[0][0] + ' ' + str(leaders[0][1]), 1, ui_variables.grey_1)
leader_2 = ui_variables.h1_b.render('2nd ' + leaders[1][0] + ' ' + str(leaders[1][1]), 1, ui_variables.grey_1)
leader_3 = ui_variables.h1_b.render('3rd ' + leaders[2][0] + ' ' + str(leaders[2][1]), 1, ui_variables.grey_1)
screen.blit(leader_1, (board_width*0.3, board_height*0.15))
screen.blit(leader_2, (board_width*0.3, board_height*0.35))
screen.blit(leader_3, (board_width*0.3, board_height*0.55))
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
pause_text = ui_variables.h2_b.render("PAUSED", 1, ui_variables.real_white)
pause_start = ui_variables.h5.render("Press esc to continue", 1, ui_variables.real_white)
pygame.display.update()
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation)
if event.key == K_ESCAPE:
pause = False
ui_variables.click_sound.play()
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.type == pygame.MOUSEMOTION:
if back_button.isOver(pos):
back_button.image = clicked_back_button_image
else :
back_button.image = back_button_image
pygame.display.update()
elif event.type == pygame.MOUSEBUTTONDOWN:
if back_button.isOver(pos):
ui_variables.click_sound.play()
leader_board=False
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
elif start:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
# Set speed
if not game_over:
keys_pressed = pygame.key.get_pressed()
if keys_pressed[K_DOWN]:
pygame.time.set_timer(pygame.USEREVENT, framerate * 1)
else:
pygame.time.set_timer(pygame.USEREVENT, framerate * 20)
# Draw a mino
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
current_time = pygame.time.get_ticks()
# Erase a mino
if not game_over:
erase_mino(dx, dy, mino, rotation)
# Move mino down
if not is_bottom(dx, dy, mino, rotation):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
if is_stackable(next_mino):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else:
ui_variables.GameOver_sound.play()
start = False
game_status = 'start'
game_over = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Erase line
erase_count = 0
combo_value = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
combo_value += 1
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
if erase_count >= 1 :
previous_time = current_time
combo_count += 1
if erase_count == 1:
ui_variables.break_sound.play()
ui_variables.single_sound.play()
score += 50 * level * erase_count + combo_count
elif erase_count == 2:
ui_variables.break_sound.play()
ui_variables.double_sound.play()
ui_variables.double_sound.play()
score += 150 * level * erase_count + 2 * combo_count
elif erase_count == 3:
ui_variables.break_sound.play()
ui_variables.triple_sound.play()
ui_variables.triple_sound.play()
ui_variables.triple_sound.play()
score += 350 * level * erase_count + 3 * combo_count
elif erase_count == 4:
ui_variables.break_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
score += 1000 * level * erase_count + 4 * combo_count
screen.blit(ui_variables.combo_4ring, (250, 160))
for i in range(1, 11) :
if combo_count == i : # 1 ~ 10 콤보 이미지
screen.blit(ui_variables.large_combos[i-1], (board_width*0.27, board_height*0.3)) # blits the combo number
elif combo_count > 10 : # 11 이상 콤보 이미지
screen.blit(tetris4, (board_width*0.27, board_height*0.3)) # blits the combo number
for i in range(1, 10) :
if combo_count == i+2 : # 3 ~ 11 콤보 사운드
ui_variables.combos_sound[i-1].play()
if current_time-previous_time > 5000:
previous_time = current_time
combo_count = 0
# 지운 블록이 없으면 콤보 -1
# if is_bottom(dx, dy, mino, rotation) :
# if erase_count == 0 :
# combo_count -= 1
# if combo_count < 0:
# combo_count = 0
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
ui_variables.LevelUp_sound.play()
ui_variables.LevelUp_sound.play()
goal += level * 5
framerate = int(framerate * 0.8)
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation)
if event.key == K_ESCAPE:
ui_variables.click_sound.play()
pause = True
# Hard drop
elif event.key == K_SPACE:
ui_variables.fall_sound.play()
ui_variables.drop_sound.play()
while not is_bottom(dx, dy, mino, rotation):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
# Hold
elif event.key == K_LSHIFT or event.key == K_q:
if hold == False:
ui_variables.move_sound.play()
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
# Turn right
elif event.key == K_UP or event.key == K_w:
if is_turnable_r(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation):
ui_variables.move_sound.play()
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation):
ui_variables.move_sound.play()
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
# Turn left
elif event.key == K_z or event.key == K_LCTRL:
if is_turnable_l(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation -= 1
# Kick
elif is_turnable_l(dx, dy - 1, mino, rotation):
ui_variables.move_sound.play()
dy -= 1
rotation -= 1
elif is_turnable_l(dx + 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
rotation -= 1
elif is_turnable_l(dx - 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
rotation -= 1
elif is_turnable_l(dx, dy - 2, mino, rotation):
ui_variables.move_sound.play()
dy -= 2
rotation += 1
elif is_turnable_l(dx + 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 2
rotation += 1
elif is_turnable_l(dx - 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 2
if rotation == -1:
rotation = 3
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
# Move left
elif event.key == K_LEFT:
if not is_leftedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
# Move right
elif event.key == K_RIGHT:
if not is_rightedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
draw_mino(dx, dy, mino, rotation)
screen.fill(ui_variables.real_white)
draw_board(next_mino, hold_mino, score, level, goal)
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
pygame.display.update()
elif pvp :
for event in pygame.event.get():
#event.key = pygame.key.get_pressed()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
# Set speed
if not game_over:
keys_pressed = pygame.key.get_pressed()
if keys_pressed[K_DOWN]:
pygame.time.set_timer(pygame.USEREVENT, framerate * 1)
else:
pygame.time.set_timer(pygame.USEREVENT, framerate * 20)
# Draw a mino
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P,dy_2P,mino_2P,rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Erase a mino
if not game_over:
erase_mino(dx, dy, mino, rotation)
erase_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
# Move mino down
if not is_bottom(dx, dy, mino, rotation):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation)
if is_stackable(next_mino):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else: #더이상 쌓을 수 없으면 게임오버
ui_variables.GameOver_sound.play()
pvp = False
game_status= 'pvp'
game_over = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Move mino down
if not is_bottom_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
dy_2P += 1
# Create new mino
else:
if hard_drop_2P or bottom_count_2P == 6:
hard_drop_2P = False
bottom_count_2P = 0
score += 10 * level
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
if is_stackable_2P(next_mino_2P):
mino_2P = next_mino_2P
next_mino_2P = randint(1, 7)
dx_2P, dy_2P = 3, 0
rotation_2P = 0
hold_2P = False
else: #더이상 쌓을 수 없으면 게임오버
ui_variables.GameOver_sound.play()
pvp = False
gagame_status= 'pvp'
game_over = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count_2P += 1
# Erase line
# 콤보 카운트
erase_count = 0
erase_count_2P = 0
combo_value = 0
sent = 0
#attack_stack = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
#attack_stack += 1
k = j
combo_value += 1
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
for j in range(21):
is_full = True
for i in range(10):
if matrix_2P[i][j] == 0:
is_full = False
if is_full:
erase_count_2P += 1
k = j
combo_value += 1
while k > 0:
for i in range(10):
matrix_2P[i][k] = matrix_2P[i][k - 1]
k -= 1
# 지운 블록이 없으면 콤보 -1
#if erase_count == 0 :
#combo_count -= 1
#if combo_count < 0:
#combo_count = 0
if erase_count >= 1:
combo_count += 1
if erase_count == 1:
ui_variables.break_sound.play()
ui_variables.single_sound.play()
score += 50 * level * erase_count + combo_count
sent += 1
elif erase_count == 2:
ui_variables.break_sound.play()
ui_variables.double_sound.play()
ui_variables.double_sound.play()
score += 150 * level * erase_count + 2 * combo_count
sent += 2
elif erase_count == 3:
ui_variables.break_sound.play()
ui_variables.triple_sound.play()
ui_variables.triple_sound.play()
ui_variables.triple_sound.play()
score += 350 * level * erase_count + 3 * combo_count
sent += 3
elif erase_count == 4:
ui_variables.break_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
ui_variables.tetris_sound.play()
score += 1000 * level * erase_count + 4 * combo_count
sent += 4
screen.blit(ui_variables.combo_4ring, (250,160))
for i in range(1, 11) :
if combo_count == i : # 1 ~ 10 콤보 이미지
screen.blit(ui_variables.large_combos[i-1], (124, 190)) # blits the combo number
elif combo_count > 10 : # 11 이상 콤보 이미지
screen.blit(tetris4, (100, 190)) # blits the combo number
for i in range(1, 10) :
if combo_count == i+2 : # 3 ~ 11 콤보 사운드
ui_variables.combos_sound[i-1].play()
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
ui_variables.LevelUp_sound.play()
ui_variables.LevelUp_sound.play()
goal += level * 5
framerate = int(framerate * 0.8)
elif event.type == KEYDOWN: ##중요
erase_mino(dx, dy, mino, rotation)
erase_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
if event.key == K_ESCAPE:
ui_variables.click_sound.play()
pause = True
# Hard drop
elif event.key == K_SPACE:
ui_variables.fall_sound.play()
ui_variables.drop_sound.play()
while not is_bottom(dx, dy, mino, rotation):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino(dx, dy, mino, rotation)
#draw_mino_2P(dx_2P,dy_2P,mino_2P,rotation_2P)
#draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_f:
ui_variables.fall_sound.play()
ui_variables.drop_sound.play()
while not is_bottom_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
dy_2P += 1
hard_drop_2P = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
#draw_mino(dx, dy, mino, rotation)
#draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Hold
elif event.key == K_LSHIFT :
if hold == False:
ui_variables.move_sound.play()
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_q :
if hold_2P == False:
ui_variables.move_sound.play()
if hold_mino_2P == -1:
hold_mino_2P = mino_2P
mino_2P = next_mino_2P
next_mino_2P = randint(1, 7)
else:
hold_mino_2P, mino_2P = mino_2P, hold_mino_2P
dx_2P, dy_2P = 3, 0
rotation_2P = 0
hold_2P = True
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Turn right
elif event.key == K_UP :
if is_turnable_r(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation):
ui_variables.move_sound.play()
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation):
ui_variables.move_sound.play()
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_x or event.key == K_w:
if is_turnable_r(dx_2P, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
rotation_2P += 1
# Kick
elif is_turnable_r(dx_2P, dy_2P - 1, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dy_2P -= 1
rotation_2P += 1
elif is_turnable_r(dx_2P + 1, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P += 1
rotation_2P += 1
elif is_turnable_r(dx_2P - 1, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P -= 1
rotation_2P += 1
elif is_turnable_r(dx_2P, dy_2P - 2, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dy_2P -= 2
rotation_2P += 1
elif is_turnable_r(dx_2P + 2, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P += 2
rotation_2P += 1
elif is_turnable_r(dx_2P - 2, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
dx_2P -= 2
rotation_2P += 1
if rotation_2P == 4:
rotation_2P = 0
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Turn left
elif event.key == K_z or event.key == K_LCTRL:
if is_turnable_l(dx, dy, mino, rotation):
ui_variables.move_sound.play()
rotation -= 1
# Kick
elif is_turnable_l(dx, dy - 1, mino, rotation):
ui_variables.move_sound.play()
dy -= 1
rotation -= 1
elif is_turnable_l(dx + 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 1
rotation -= 1
elif is_turnable_l(dx - 1, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 1
rotation -= 1
elif is_turnable_l(dx, dy - 2, mino, rotation):
ui_variables.move_sound.play()
dy -= 2
rotation += 1
elif is_turnable_l(dx + 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx += 2
rotation += 1
elif is_turnable_l(dx - 2, dy, mino, rotation):
ui_variables.move_sound.play()
dx -= 2
if rotation == -1:
rotation = 3
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Move left
elif event.key == K_LEFT : # key = pygame.key.get_pressed()
if not is_leftedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx -= 1
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Move right
elif event.key == K_RIGHT :
if not is_rightedge(dx, dy, mino, rotation):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx += 1
draw_mino(dx, dy, mino, rotation)
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.key == K_a : # key = pygame.key.get_pressed()
if not is_leftedge_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx_2P -= 1
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
# Move right
elif event.key == K_d :
if not is_rightedge_2P(dx_2P, dy_2P, mino_2P, rotation_2P):
ui_variables.move_sound.play()
keys_pressed = pygame.key.get_pressed()
pygame.time.set_timer(pygame.KEYUP, framerate * 3)
dx_2P += 1
draw_mino_2P(dx_2P, dy_2P, mino_2P, rotation_2P)
draw_mino(dx, dy, mino, rotation)
draw_multiboard(next_mino,hold_mino,next_mino_2P,hold_mino_2P,score,level,goal)
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
#if any(movement_keys.values()):
# movement_keys_timer += clock.tick(50)
pygame.display.update()
# Game over screen
elif game_over:
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.mixer.music.stop()
pygame.time.set_timer(pygame.USEREVENT, 300)
draw_image(screen ,gameover_board_image, board_width*0.5,board_height*0.5, int(board_height*0.7428), board_height)
menu_button.draw(screen,(0,0,0))
restart_button.draw(screen,(0,0,0))
ok_button.draw(screen,(0,0,0))
name_1 = ui_variables.h1_b.render(chr(name[0]), 1, ui_variables.white)
name_2 = ui_variables.h1_b.render(chr(name[1]), 1, ui_variables.white)
name_3 = ui_variables.h1_b.render(chr(name[2]), 1, ui_variables.white)
underbar_1 = ui_variables.h1_b.render("_", 1, ui_variables.white)
underbar_2 = ui_variables.h1_b.render("_", 1, ui_variables.white)
underbar_3 = ui_variables.h1_b.render("_", 1, ui_variables.white)
screen.blit(name_1, (int(board_width*0.434), int(board_height*0.55)))
screen.blit(name_2, (int(board_width*0.494), int(board_height*0.55)))
screen.blit(name_3, (int(board_width*0.545), int(board_height*0.55)))
if blink:
blink = False
else:
if name_location == 0:
screen.blit(underbar_1, ((int(board_width*0.437), int(board_height*0.56))))
elif name_location == 1:
screen.blit(underbar_2, ((int(board_width*0.497), int(board_height*0.56))))
elif name_location == 2:
screen.blit(underbar_3, ((int(board_width*0.557), int(board_height*0.56))))
blink = True
pygame.display.update()
elif event.type == KEYDOWN:
if event.key == K_RETURN:
ui_variables.click_sound.play()
outfile = open('leaderboard.txt','a')
outfile.write(chr(name[0]) + chr(name[1]) + chr(name[2]) + ' ' + str(score) + '\n')
outfile.close()
game_over = False
hold = False #
dx, dy = 3, 0 #
rotation = 0 #
mino = randint(1, 7) #
next_mino = randint(1, 7) #
hold_mino = -1 #
framerate = 30
score = 0
combo_count = 0
level = 1
goal = level * 5
bottom_count = 0 #
hard_drop = False #
name_location = 0
name = [65, 65, 65]
matrix = [[0 for y in range(height + 1)] for x in range(width)]
hold_mino_2P = -1 #
bottom_count_2P = 0 #
hard_drop_2P = False #
hold_2P = False #
next_mino_2P = randint(1,7) #
mino_2P = randint(1,7) #
rotation_2P = 0 #
dx_2P , dy_2P = 3, 0 #
matrix_2P = [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
with open('leaderboard.txt') as f:
lines = f.readlines()
lines = [line.rstrip('\n') for line in open('leaderboard.txt')]
leaders = {'AAA': 0, 'BBB': 0, 'CCC': 0}
for i in lines:
leaders[i.split(' ')[0]] = int(i.split(' ')[1])
leaders = sorted(leaders.items(), key=operator.itemgetter(1), reverse=True)
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_RIGHT:
if name_location != 2:
name_location += 1
else:
name_location = 0
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_LEFT:
if name_location != 0:
name_location -= 1
else:
name_location = 2
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_UP:
ui_variables.click_sound.play()
if name[name_location] != 90:
name[name_location] += 1
else:
name[name_location] = 65
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_DOWN:
ui_variables.click_sound.play()
if name[name_location] != 65:
name[name_location] -= 1
else:
name[name_location] = 90
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.type == pygame.MOUSEMOTION:
if resume_button.isOver(pos):
menu_button.image = clicked_menu_button_image
else :
menu_button.image = menu_button_image
if restart_button.isOver(pos):
restart_button.image = clicked_restart_button_image
else :
restart_button.image = restart_button_image
if ok_button.isOver(pos):
ok_button.image = clicked_ok_button_image
else :
ok_button.image = ok_button_image
pygame.display.update()
elif event.type == pygame.MOUSEBUTTONDOWN:
if ok_button.isOver(pos):
ui_variables.click_sound.play()
ui_variables.click_sound.play()
outfile = open('leaderboard.txt','a')
outfile.write(chr(name[0]) + chr(name[1]) + chr(name[2]) + ' ' + str(score) + '\n')
outfile.close()
game_over = False
hold = False #
dx, dy = 3, 0 #
rotation = 0 #
mino = randint(1, 7) #
next_mino = randint(1, 7) #
hold_mino = -1 #
framerate = 30
score = 0
combo_count = 0
level = 1
goal = level * 5
bottom_count = 0 #
hard_drop = False #
name_location = 0
name = [65, 65, 65]
matrix = [[0 for y in range(height + 1)] for x in range(width)]
hold_mino_2P = -1 #
bottom_count_2P = 0 #
hard_drop_2P = False #
hold_2P = False #
next_mino_2P = randint(1,7) #
mino_2P = randint(1,7) #
rotation_2P = 0 #
dx_2P , dy_2P = 3, 0 #
matrix_2P = [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
with open('leaderboard.txt') as f:
lines = f.readlines()
lines = [line.rstrip('\n') for line in open('leaderboard.txt')]
leaders = {'AAA': 0, 'BBB': 0, 'CCC': 0}
for i in lines:
leaders[i.split(' ')[0]] = int(i.split(' ')[1])
leaders = sorted(leaders.items(), key=operator.itemgetter(1), reverse=True)
pygame.time.set_timer(pygame.USEREVENT, 1)
if menu_button.isOver(pos):
ui_variables.click_sound.play()
start = False
pvp = False
game_over = False
hold = False
dx, dy = 3, 0
rotation = 0
mino = randint(1, 7)
next_mino = randint(1, 7)
hold_mino = -1
framerate = 30
score = 0
combo_count = 0
level = 1
goal = level * 5
bottom_count = 0
hard_drop = False
name_location = 0
name = [65, 65, 65]
matrix = [[0 for y in range(height + 1)] for x in range(width)]
if restart_button.isOver(pos):
if game_status == 'start':
start = True
pygame.mixer.music.play(-1)
if game_status == 'pvp':
pvp = True
pygame.mixer.music.play(-1)
ui_variables.click_sound.play()
game_over = False
hold = False
dx, dy = 3, 0
rotation = 0
mino = randint(1, 7)
next_mino = randint(1, 7)
hold_mino = -1
framerate = 30
score = 0
combo_count = 0
level = 1
goal = level * 5
bottom_count = 0
hard_drop = False
name_location = 0
name = [65, 65, 65]
matrix = [[0 for y in range(height + 1)] for x in range(width)]
pause = False
if resume_button.isOver(pos):
pause = False
ui_variables.click_sound.play()
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
# Start screen
else:
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
elif event.type == KEYDOWN:
if event.key == K_SPACE:
ui_variables.click_sound.play()
start = True
elif event.type == pygame.MOUSEMOTION:
if single_button.isOver(pos):
single_button.image = clicked_single_button_image
else :
single_button.image = single_button_image
if pvp_button.isOver(pos):
pvp_button.image = clicked_pvp_button_image
else :
pvp_button.image = pvp_button_image
if help_button.isOver(pos):
help_button.image = clicked_help_button_image
else :
help_button.image = help_button_image
if quit_button.isOver(pos):
quit_button.image = clicked_quit_button_image
else :
quit_button.image = quit_button_image
if setting_icon.isOver(pos):
setting_icon.image = clicked_setting_vector
else :
setting_icon.image = setting_vector
if leaderboard_icon.isOver(pos):
leaderboard_icon.image = clicked_leaderboard_vector
else :
leaderboard_icon.image = leaderboard_vector
elif event.type == pygame.MOUSEBUTTONDOWN:
if single_button.isOver(pos):
ui_variables.click_sound.play()
previous_time = pygame.time.get_ticks()
start = True
pygame.mixer.music.play(-1)
if pvp_button.isOver(pos):
ui_variables.click_sound.play()
pvp = True
pygame.mixer.music.play(-1)
if leaderboard_icon.isOver(pos):
ui_variables.click_sound.play()
leader_board = True
if setting_icon.isOver(pos):
ui_variables.click_sound.play()
setting = True
if quit_button.isOver(pos):
ui_variables.click_sound.play()
done = True
if help_button.isOver(pos):
ui_variables.click_sound.play()
help = True
elif event.type == VIDEORESIZE:
board_width = event.w
board_height = event.h
block_size = int(board_height*0.045)
screen = pygame.display.set_mode((board_width, board_height),pygame.RESIZABLE)
single_button = button(board_width*0.78, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,single_button_image)
pvp_button = button(board_width*0.78, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),2,pvp_button_image)
help_button = button(board_width*0.78, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),3,help_button_image)
quit_button = button(board_width*0.78, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),4,quit_button_image)
setting_icon = button(board_width*0.1, board_height*0.85,int(board_height*0.23), int(board_height*0.23),5,setting_vector)
leaderboard_icon = button(board_width*0.1, board_height*0.6,int(board_height*0.23), int(board_height*0.23),6,leaderboard_vector)
resume_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,resume_button_image)
restart_button = button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,restart_button_image)
setting_button = button(board_width*0.5, board_height*0.63,int(board_width*0.3734), int(board_height*0.1777),1,setting_button_image)
pause_quit_button= button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
back_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,back_button_image)
volume_icon = button(board_width*0.25, board_height*0.3,int(board_height*0.23), int(board_height*0.23),5,volume_vector)
screen_icon = button(board_width*0.45, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,screen_vector)
keyboard_icon = button(board_width*0.65, board_height*0.3,int(board_height*0.23), int(board_height*0.23),6,keyboard_vector)
ok_button = button(board_width*0.5, board_height*0.83,int(board_width*0.3734), int(board_height*0.1777),1,ok_button_image)
menu_button = button(board_width*0.5, board_height*0.23,int(board_width*0.3734), int(board_height*0.1777),1,menu_button_image)
gameover_quit_button= button(board_width*0.5, board_height*0.43,int(board_width*0.3734), int(board_height*0.1777),1,quit_button_image)
screen.fill(ui_variables.white)
draw_image(screen,background_image,board_width*0.5,board_height*0.5,board_width,board_height)
single_button.draw(screen,(0,0,0))
pvp_button.draw(screen,(0,0,0))
help_button.draw(screen,(0,0,0))
quit_button.draw(screen,(0,0,0))
setting_icon.draw(screen,(0,0,0))
leaderboard_icon.draw(screen,(0,0,0))
if not start:
pygame.display.update()
clock.tick(3)
pygame.quit()
|
py | b40fdea157fe74da15e43b3fff37d13397ed59fd | from datetime import datetime, timezone
import re
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Button, Div, Field, Layout, Submit
from django import forms
from django.contrib.auth.models import Permission
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.dispatch import receiver
from django.forms import CheckboxSelectMultiple, SelectMultiple, TextInput
from django_comments.models import Comment
from django_countries import Countries
from django_countries.fields import CountryField
from markdownx.fields import MarkdownxFormField
from dashboard.models import Continent
# this is used instead of Django Autocomplete Light widgets
# see issue #1330: https://github.com/swcarpentry/amy/issues/1330
from workshops.fields import (
ModelSelect2MultipleWidget,
ModelSelect2Widget,
RadioSelectWithOther,
Select2MultipleWidget,
Select2TagWidget,
Select2Widget,
)
from workshops.models import (
Airport,
Award,
Badge,
Event,
GenderMixin,
KnowledgeDomain,
Language,
Lesson,
Membership,
Organization,
Person,
Tag,
Task,
)
from workshops.signals import create_comment_signal
# this makes it possible for Select2 autocomplete widget to fit in low-width sidebar
SELECT2_SIDEBAR = {
"data-width": "100%",
"width": "style",
}
class BootstrapHelper(FormHelper):
"""Layout and behavior for crispy-displayed forms."""
html5_required = True
form_id = "main-form"
def __init__(
self,
form=None,
duplicate_buttons_on_top=False,
submit_label="Submit",
submit_name="submit",
submit_onclick=None,
use_get_method=False,
wider_labels=False,
add_submit_button=True,
add_delete_button=False,
add_cancel_button=True,
additional_form_class="",
form_tag=True,
display_labels=True,
form_action=None,
form_id=None,
include_media=True,
):
"""
`duplicate_buttons_on_top` -- Whether submit buttons should be
displayed on both top and bottom of the form.
`use_get_method` -- Force form to use GET instead of default POST.
`wider_labels` -- SWCEventRequestForm and DCEventRequestForm have
long labels, so this flag (set to True) is used to address that issue.
`add_delete_button` -- displays additional red "delete" button.
If you want to use it, you need to include in your template the
following code:
<form action="delete?next={{ request.GET.next|urlencode }}" method="POST"
id="delete-form">
{% csrf_token %}
</form>
This is necessary, because delete button must be reassigned from the
form using this helper to "delete-form". This reassignment is done
via HTML5 "form" attribute on the "delete" button.
`display_labels` -- Set to False, when your form has only submit
buttons and you want these buttons to be aligned to left.
"""
super().__init__(form)
self.attrs["role"] = "form"
self.duplicate_buttons_on_top = duplicate_buttons_on_top
self.submit_label = submit_label
if use_get_method:
self.form_method = "get"
if wider_labels:
assert display_labels
self.label_class = "col-12 col-lg-3"
self.field_class = "col-12 col-lg-9"
elif display_labels:
self.label_class = "col-12 col-lg-2"
self.field_class = "col-12 col-lg-10"
else:
self.label_class = ""
self.field_class = "col-lg-12"
if add_submit_button:
self.add_input(
Submit(
submit_name,
submit_label,
onclick=submit_onclick,
)
)
if add_delete_button:
self.add_input(
Submit(
"delete",
"Delete",
onclick="return " 'confirm("Are you sure you want to delete it?");',
form="delete-form",
css_class="btn-danger float-right",
)
)
if add_cancel_button:
self.add_input(
Button(
"cancel",
"Cancel",
css_class="btn-secondary float-right",
onclick="window.history.back()",
)
)
# offset here adds horizontal centering for all these forms
self.form_class = "form-horizontal " + additional_form_class
self.form_tag = form_tag
if form_action is not None:
self.form_action = form_action
if form_id is not None:
self.form_id = form_id
# don't prevent from loading media by default
self.include_media = include_media
def hr(self):
"""Horizontal line as a separator in forms is used very often. But
since from time to time the forms are changed (in terms of columns
width), we should rather use one global <hr>..."""
return '<hr class="col-12 mx-0 px-0">'
class BootstrapHelperFilter(FormHelper):
"""A differently shaped forms (more space-efficient) for use in sidebar as
filter forms."""
form_method = "get"
form_id = "filter-form"
def __init__(self, form=None):
super().__init__(form)
self.attrs["role"] = "form"
self.inputs.append(Submit("", "Submit"))
class BootstrapHelperFormsetInline(BootstrapHelper):
"""For use in inline formsets."""
template = "bootstrap/table_inline_formset.html"
bootstrap_helper_filter = BootstrapHelperFilter()
bootstrap_helper_inline_formsets = BootstrapHelperFormsetInline()
# ----------------------------------------------------------
# MixIns
class PrivacyConsentMixin(forms.Form):
privacy_consent = forms.BooleanField(
label="*I have read and agree to <a href="
'"https://docs.carpentries.org/topic_folders/policies/privacy.html"'
' target="_blank" rel="noreferrer">'
"the data privacy policy of The Carpentries</a>.",
required=True,
)
class WidgetOverrideMixin:
def __init__(self, *args, **kwargs):
widgets = kwargs.pop("widgets", {})
super().__init__(*args, **kwargs)
for field, widget in widgets.items():
self.fields[field].widget = widget
# ----------------------------------------------------------
# Forms
def continent_list():
"""This has to be as a callable, because otherwise Django evaluates this
query and, if the database doesn't exist yet (e.g. during Travis-CI
tests)."""
return [("", "")] + list(Continent.objects.values_list("pk", "name"))
class WorkshopStaffForm(forms.Form):
"""Represent instructor matching form."""
latitude = forms.FloatField(
label="Latitude", min_value=-90.0, max_value=90.0, required=False
)
longitude = forms.FloatField(
label="Longitude", min_value=-180.0, max_value=180.0, required=False
)
airport = forms.ModelChoiceField(
label="Airport",
required=False,
queryset=Airport.objects.all(),
widget=ModelSelect2Widget(data_view="airport-lookup", attrs=SELECT2_SIDEBAR),
)
languages = forms.ModelMultipleChoiceField(
label="Languages",
required=False,
queryset=Language.objects.all(),
widget=ModelSelect2MultipleWidget(
data_view="language-lookup",
attrs=SELECT2_SIDEBAR,
),
)
domains = forms.ModelMultipleChoiceField(
label="Knowlege Domains",
required=False,
queryset=KnowledgeDomain.objects.all(),
widget=ModelSelect2MultipleWidget(
data_view="knowledge-domains-lookup",
attrs=SELECT2_SIDEBAR,
),
)
country = forms.MultipleChoiceField(
choices=list(Countries()),
required=False,
widget=Select2MultipleWidget,
)
continent = forms.ChoiceField(
choices=continent_list,
required=False,
widget=Select2Widget,
)
lessons = forms.ModelMultipleChoiceField(
queryset=Lesson.objects.all(),
widget=SelectMultiple(),
required=False,
)
badges = forms.ModelMultipleChoiceField(
queryset=Badge.objects.instructor_badges(),
widget=CheckboxSelectMultiple(),
required=False,
)
is_trainer = forms.BooleanField(required=False, label="Has Trainer badge")
GENDER_CHOICES = ((None, "---------"),) + Person.GENDER_CHOICES
gender = forms.ChoiceField(choices=GENDER_CHOICES, required=False)
was_helper = forms.BooleanField(
required=False, label="Was helper at least once before"
)
was_organizer = forms.BooleanField(
required=False, label="Was organizer at least once before"
)
is_in_progress_trainee = forms.BooleanField(
required=False, label="Is an in-progress instructor trainee"
)
def __init__(self, *args, **kwargs):
"""Build form layout dynamically."""
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = "get"
self.helper.layout = Layout(
Div(
Div(
HTML('<h5 class="card-title">Location</h5>'),
"airport",
HTML("<hr>"),
"country",
HTML("<hr>"),
"continent",
HTML("<hr>"),
"latitude",
"longitude",
css_class="card-body",
),
css_class="card",
),
"badges",
"is_trainer",
HTML("<hr>"),
"was_helper",
"was_organizer",
"is_in_progress_trainee",
"languages",
"domains",
"gender",
"lessons",
Submit("", "Submit"),
)
def clean(self):
cleaned_data = super().clean()
lat = bool(cleaned_data.get("latitude"))
lng = bool(cleaned_data.get("longitude"))
airport = bool(cleaned_data.get("airport"))
country = bool(cleaned_data.get("country"))
latlng = lat and lng
# if searching by coordinates, then there must be both lat & lng
# present
if lat ^ lng:
raise ValidationError(
"Must specify both latitude and longitude if searching by "
"coordinates"
)
# User must search by airport, or country, or coordinates, or none
# of them. Sum of boolean elements must be equal 0 (if general search)
# or 1 (if searching by airport OR country OR lat/lng).
if sum([airport, country, latlng]) not in [0, 1]:
raise ValidationError(
"Must specify an airport OR a country, OR use coordinates, OR "
"none of them."
)
return cleaned_data
class BulkUploadCSVForm(forms.Form):
"""This form allows to upload a single file; it's used by person bulk
upload and training request manual score bulk upload."""
file = forms.FileField()
class EventForm(forms.ModelForm):
administrator = forms.ModelChoiceField(
label="Administrator",
required=True,
help_text=Event._meta.get_field("administrator").help_text,
queryset=Organization.objects.administrators(),
widget=ModelSelect2Widget(data_view="administrator-org-lookup"),
)
assigned_to = forms.ModelChoiceField(
label="Assigned to",
required=False,
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="admin-lookup"),
)
language = forms.ModelChoiceField(
label="Language",
required=False,
queryset=Language.objects.all(),
widget=ModelSelect2Widget(data_view="language-lookup"),
)
country = CountryField().formfield(
required=False,
help_text=Event._meta.get_field("country").help_text,
widget=Select2Widget,
)
comment = MarkdownxFormField(
label="Comment",
help_text="Any content in here will be added to comments after this "
"event is saved.",
widget=forms.Textarea,
required=False,
)
helper = BootstrapHelper(add_cancel_button=False, duplicate_buttons_on_top=True)
class Meta:
model = Event
fields = [
"slug",
"completed",
"start",
"end",
"host",
"sponsor",
"membership",
"administrator",
"assigned_to",
"tags",
"url",
"language",
"reg_key",
"venue",
"manual_attendance",
"contact",
"country",
"address",
"latitude",
"longitude",
"open_TTT_applications",
"curricula",
"lessons",
"public_status",
"instructors_pre",
"instructors_post",
"comment",
]
widgets = {
"host": ModelSelect2Widget(data_view="organization-lookup"),
"sponsor": ModelSelect2Widget(data_view="organization-lookup"),
"membership": ModelSelect2Widget(data_view="membership-lookup"),
"manual_attendance": TextInput,
"latitude": TextInput,
"longitude": TextInput,
"tags": SelectMultiple(attrs={"size": Tag.ITEMS_VISIBLE_IN_SELECT_WIDGET}),
"curricula": CheckboxSelectMultiple(),
"lessons": CheckboxSelectMultiple(),
"contact": Select2TagWidget,
}
class Media:
js = (
"date_yyyymmdd.js",
"edit_from_url.js",
"online_country.js",
)
def __init__(self, *args, **kwargs):
show_lessons = kwargs.pop("show_lessons", False)
add_comment = kwargs.pop("add_comment", True)
super().__init__(*args, **kwargs)
self.helper.layout = Layout(
Field("slug", placeholder="YYYY-MM-DD-location"),
"completed",
Field("start", placeholder="YYYY-MM-DD"),
Field("end", placeholder="YYYY-MM-DD"),
"host",
"sponsor",
"membership",
"administrator",
"public_status",
"assigned_to",
"tags",
"open_TTT_applications",
"curricula",
"url",
"language",
"reg_key",
"manual_attendance",
"contact",
"instructors_pre",
"instructors_post",
Div(
Div(HTML("Location details"), css_class="card-header"),
Div(
"country",
"venue",
"address",
"latitude",
"longitude",
css_class="card-body",
),
css_class="card mb-2",
),
)
# if we want to show lessons, we need to alter existing layout
# otherwise we should remove the field so it doesn't break validation
if show_lessons:
self.helper.layout.insert(
# insert AFTER the curricula
self.helper.layout.fields.index("curricula") + 1,
"lessons",
)
else:
del self.fields["lessons"]
if add_comment:
self.helper.layout.append("comment")
else:
del self.fields["comment"]
def clean_slug(self):
# Ensure slug is in "YYYY-MM-DD-location" format
data = self.cleaned_data["slug"]
match = re.match(r"(\d{4}|x{4})-(\d{2}|x{2})-(\d{2}|x{2})-.+", data)
if not match:
raise ValidationError(
'Slug must be in "YYYY-MM-DD-location"'
' format, where "YYYY", "MM", "DD" can'
' be unspecified (ie. "xx").'
)
return data
def clean_end(self):
"""Ensure end >= start."""
start = self.cleaned_data["start"]
end = self.cleaned_data["end"]
if start and end and end < start:
raise ValidationError("Must not be earlier than start date.")
return end
def clean_open_TTT_applications(self):
"""Ensure there's a TTT tag applied to the event, if the
`open_TTT_applications` is True."""
open_TTT_applications = self.cleaned_data["open_TTT_applications"]
tags = self.cleaned_data.get("tags", None)
error_msg = "You cannot open applications on a non-TTT event."
if open_TTT_applications and tags:
# find TTT tag
TTT_tag = False
for tag in tags:
if tag.name == "TTT":
TTT_tag = True
break
if not TTT_tag:
raise ValidationError(error_msg)
elif open_TTT_applications:
raise ValidationError(error_msg)
return open_TTT_applications
def clean_curricula(self):
"""Validate tags when some curricula are selected."""
curricula = self.cleaned_data["curricula"]
tags = self.cleaned_data["tags"]
try:
expected_tags = []
for c in curricula:
if c.active and c.carpentry:
expected_tags.append(c.carpentry)
elif c.active and c.mix_match:
expected_tags.append("Circuits")
except (ValueError, TypeError):
expected_tags = []
for tag in expected_tags:
if not tags.filter(name=tag):
raise forms.ValidationError(
"You must add tags corresponding to these curricula."
)
return curricula
def clean_manual_attendance(self):
"""Regression: #1608 - fix 500 server error when field is cleared."""
manual_attendance = self.cleaned_data["manual_attendance"] or 0
return manual_attendance
def save(self, *args, **kwargs):
res = super().save(*args, **kwargs)
comment = self.cleaned_data.get("comment")
if comment:
create_comment_signal.send(
sender=self.__class__,
content_object=res,
comment=comment,
timestamp=None,
)
return res
class EventCreateForm(EventForm):
comment = MarkdownxFormField(
label="Comment",
help_text="This will be added to comments after the event is created.",
widget=forms.Textarea,
required=False,
)
class TaskForm(WidgetOverrideMixin, forms.ModelForm):
SEAT_MEMBERSHIP_HELP_TEXT = (
"{}<br><b>Hint:</b> you can use input format YYYY-MM-DD to display "
"memberships available on that date.".format(
Task._meta.get_field("seat_membership").help_text
)
)
seat_membership = forms.ModelChoiceField(
label=Task._meta.get_field("seat_membership").verbose_name,
help_text=SEAT_MEMBERSHIP_HELP_TEXT,
required=False,
queryset=Membership.objects.all(),
widget=ModelSelect2Widget(
data_view="membership-lookup",
attrs=SELECT2_SIDEBAR,
),
)
class Meta:
model = Task
fields = [
"event",
"person",
"role",
"title",
"url",
"seat_membership",
"seat_public",
"seat_open_training",
]
widgets = {
"person": ModelSelect2Widget(
data_view="person-lookup", attrs=SELECT2_SIDEBAR
),
"event": ModelSelect2Widget(
data_view="event-lookup", attrs=SELECT2_SIDEBAR
),
"seat_public": forms.RadioSelect(),
}
def __init__(self, *args, **kwargs):
form_tag = kwargs.pop("form_tag", True)
failed_trainings = kwargs.pop("failed_trainings", False)
super().__init__(*args, **kwargs)
bootstrap_kwargs = {
"add_cancel_button": False,
"form_tag": form_tag,
}
if failed_trainings:
bootstrap_kwargs["submit_onclick"] = (
'return confirm("Warning: Trainee failed previous training(s).'
' Are you sure you want to continue?");'
)
self.helper = BootstrapHelper(**bootstrap_kwargs)
class PersonForm(forms.ModelForm):
airport = forms.ModelChoiceField(
label="Airport",
required=False,
queryset=Airport.objects.all(),
widget=ModelSelect2Widget(data_view="airport-lookup"),
)
languages = forms.ModelMultipleChoiceField(
label="Languages",
required=False,
queryset=Language.objects.all(),
widget=ModelSelect2MultipleWidget(data_view="language-lookup"),
)
helper = BootstrapHelper(add_cancel_button=False, duplicate_buttons_on_top=True)
class Meta:
model = Person
# don't display the 'password', 'user_permissions',
# 'groups' or 'is_superuser' fields
# + reorder fields
fields = [
"username",
"personal",
"middle",
"family",
"email",
"secondary_email",
"gender",
"gender_other",
"country",
"airport",
"affiliation",
"github",
"twitter",
"url",
"occupation",
"orcid",
"user_notes",
"lessons",
"domains",
"languages",
]
widgets = {
"country": Select2Widget,
"gender": RadioSelectWithOther("gender_other"),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set up a layout object for the helper
self.helper.layout = self.helper.build_default_layout(self)
# set up `*WithOther` widgets so that they can display additional
# fields inline
self["gender"].field.widget.other_field = self["gender_other"]
# remove additional fields
self.helper.layout.fields.remove("gender_other")
def clean(self):
super().clean()
errors = dict()
# 1: require "other gender" field if "other" was selected in
# "gender" field
gender = self.cleaned_data.get("gender", "")
gender_other = self.cleaned_data.get("gender_other", "")
if gender == GenderMixin.OTHER and not gender_other:
errors["gender"] = ValidationError("This field is required.")
elif gender != GenderMixin.OTHER and gender_other:
errors["gender"] = ValidationError(
'If you entered data in "Other" field, please select that ' "option."
)
# raise errors if any present
if errors:
raise ValidationError(errors)
class PersonCreateForm(PersonForm):
comment = MarkdownxFormField(
label="Comment",
help_text="This will be added to comments after the person is " "created.",
widget=forms.Textarea,
required=False,
)
class Meta(PersonForm.Meta):
# remove 'username' field as it's being populated after form save
# in the `views.PersonCreate.form_valid`
fields = PersonForm.Meta.fields.copy()
fields.remove("username")
fields.append("comment")
class PersonPermissionsForm(forms.ModelForm):
helper = BootstrapHelper(add_cancel_button=False)
user_permissions = forms.ModelMultipleChoiceField(
label=Person._meta.get_field("user_permissions").verbose_name,
help_text=Person._meta.get_field("user_permissions").help_text,
required=False,
queryset=Permission.objects.select_related("content_type"),
)
user_permissions.widget.attrs.update({"class": "resizable-vertical", "size": "20"})
class Meta:
model = Person
# only display administration-related fields: groups, permissions,
# being a superuser or being active (== ability to log in)
fields = [
"is_active",
"is_superuser",
"user_permissions",
"groups",
]
class PersonsSelectionForm(forms.Form):
person_a = forms.ModelChoiceField(
label="Person From",
required=True,
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="person-lookup"),
)
person_b = forms.ModelChoiceField(
label="Person To",
required=True,
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="person-lookup"),
)
helper = BootstrapHelper(use_get_method=True, add_cancel_button=False)
class PersonsMergeForm(forms.Form):
TWO = (
("obj_a", "Use A"),
("obj_b", "Use B"),
)
THREE = TWO + (("combine", "Combine"),)
DEFAULT = "obj_a"
person_a = forms.ModelChoiceField(
queryset=Person.objects.all(), widget=forms.HiddenInput
)
person_b = forms.ModelChoiceField(
queryset=Person.objects.all(), widget=forms.HiddenInput
)
id = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
username = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
personal = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
middle = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
family = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
email = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
secondary_email = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
gender = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
gender_other = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
airport = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
github = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
twitter = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
url = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
affiliation = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
occupation = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
orcid = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
award_set = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
qualification_set = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
label="Lessons",
)
domains = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
languages = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
task_set = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
is_active = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
trainingprogress_set = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
comment_comments = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
comments = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
consent_set = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
class AwardForm(WidgetOverrideMixin, forms.ModelForm):
class Meta:
model = Award
fields = "__all__"
widgets = {
"person": ModelSelect2Widget(
data_view="person-lookup", attrs=SELECT2_SIDEBAR
),
"event": ModelSelect2Widget(
data_view="event-lookup", attrs=SELECT2_SIDEBAR
),
"awarded_by": ModelSelect2Widget(
data_view="admin-lookup", attrs=SELECT2_SIDEBAR
),
}
def __init__(self, *args, **kwargs):
form_tag = kwargs.pop("form_tag", True)
failed_trainings = kwargs.pop("failed_trainings", False)
super().__init__(*args, **kwargs)
bootstrap_kwargs = {
"add_cancel_button": False,
"form_tag": form_tag,
}
if failed_trainings:
bootstrap_kwargs["submit_onclick"] = (
'return confirm("Warning: Trainee failed previous training(s).'
' Are you sure you want to continue?");'
)
self.helper = BootstrapHelper(**bootstrap_kwargs)
class EventLookupForm(forms.Form):
event = forms.ModelChoiceField(
label="Event",
required=True,
queryset=Event.objects.all(),
widget=ModelSelect2Widget(data_view="event-lookup"),
)
helper = BootstrapHelper(add_cancel_button=False)
class PersonLookupForm(forms.Form):
person = forms.ModelChoiceField(
label="Person",
required=True,
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="person-lookup"),
)
helper = BootstrapHelper(use_get_method=True, add_cancel_button=False)
class AdminLookupForm(forms.Form):
person = forms.ModelChoiceField(
label="Administrator",
required=True,
queryset=Person.objects.all(),
widget=ModelSelect2Widget(
data_view="admin-lookup",
attrs=SELECT2_SIDEBAR,
),
)
helper = BootstrapHelper(add_cancel_button=False)
class EventsSelectionForm(forms.Form):
event_a = forms.ModelChoiceField(
label="Event A",
required=True,
queryset=Event.objects.all(),
widget=ModelSelect2Widget(data_view="event-lookup"),
)
event_b = forms.ModelChoiceField(
label="Event B",
required=True,
queryset=Event.objects.all(),
widget=ModelSelect2Widget(data_view="event-lookup"),
)
helper = BootstrapHelper(use_get_method=True, add_cancel_button=False)
class EventsMergeForm(forms.Form):
TWO = (
("obj_a", "Use A"),
("obj_b", "Use B"),
)
THREE = TWO + (("combine", "Combine"),)
DEFAULT = "obj_a"
event_a = forms.ModelChoiceField(
queryset=Event.objects.all(), widget=forms.HiddenInput
)
event_b = forms.ModelChoiceField(
queryset=Event.objects.all(), widget=forms.HiddenInput
)
id = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
slug = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
completed = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
assigned_to = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
start = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
end = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
host = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
sponsor = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
administrator = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
public_status = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
tags = forms.ChoiceField(choices=THREE, initial=DEFAULT, widget=forms.RadioSelect)
url = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
language = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
reg_key = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
manual_attendance = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
contact = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
country = forms.ChoiceField(choices=TWO, initial=DEFAULT, widget=forms.RadioSelect)
venue = forms.ChoiceField(choices=THREE, initial=DEFAULT, widget=forms.RadioSelect)
address = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
latitude = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
longitude = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
learners_pre = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
learners_post = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
instructors_pre = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
instructors_post = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
learners_longterm = forms.ChoiceField(
choices=TWO,
initial=DEFAULT,
widget=forms.RadioSelect,
)
task_set = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
comments = forms.ChoiceField(
choices=THREE,
initial=DEFAULT,
widget=forms.RadioSelect,
)
# ----------------------------------------------------------
# Signals
@receiver(create_comment_signal, sender=EventForm)
@receiver(create_comment_signal, sender=EventCreateForm)
@receiver(create_comment_signal, sender=PersonCreateForm)
def form_saved_add_comment(sender, **kwargs):
"""A receiver for custom form.save() signal. This is intended to save
comment, entered as a form field, when creating a new object, and present
it as automatic system Comment (from django_comments app)."""
content_object = kwargs.get("content_object", None)
comment = kwargs.get("comment", None)
timestamp = kwargs.get("timestamp", datetime.now(timezone.utc))
# only proceed if we have an actual object (that exists in DB), and
# comment contents
if content_object and comment and content_object.pk:
site = Site.objects.get_current()
Comment.objects.create(
content_object=content_object,
site=site,
user=None,
user_name="Automatic comment",
submit_date=timestamp,
comment=comment,
)
|
py | b40fe0e07d493fc666651a56685419d85af253e8 | #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import os
import logging
import logging.config
import traceback
from bson import ObjectId
from arvet.config.global_configuration import load_global_config
from arvet.config.path_manager import PathManager
import arvet.database.connection as dbconn
import arvet.database.image_manager as im_manager
from arvet.batch_analysis.experiment import Experiment
def patch_cwd():
"""
Patch sys.path to make sure the current working directory is included.
This is necessary when this is being used as a library,
and we run the script by file path.
:return:
"""
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
def main(*args):
"""
Run analysis for a particular experiment
:args: Only argument is the id of the task to run
:return:
"""
if len(args) >= 1:
experiment_id = ObjectId(args[0])
# patch_cwd()
# Load the configuration
config = load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
# Configure the database and the image manager
dbconn.configure(config['database'])
im_manager.configure(config['image_manager'])
# Set up the path manager
path_manger = PathManager(paths=config['paths'], temp_folder=config['temp_folder'])
# Try and get the experiment object
try:
experiment = Experiment.objects.get({'_id': experiment_id})
except Exception as ex:
logging.getLogger(__name__).critical("Exception occurred while loading Experiment({0}):\n{1}".format(
str(experiment_id), traceback.format_exc()
))
raise ex
# Since we got the experiment, run the analyis
try:
experiment.perform_analysis()
except Exception as ex:
logging.getLogger(__name__).critical("Exception occurred while performing analysis {0}({1}):\n{2}".format(
type(experiment).__name__, str(experiment_id), traceback.format_exc()
))
raise ex
if __name__ == '__main__':
main(*sys.argv[1:])
|
py | b40fe0e6890d3a43acf6d0bd0ecf4e32452d254c | class FakeOAIPMHData:
fake_xml_record = """<?xml version="1.0"?>
<arXiv xsi:schemaLocation="http://arxiv.org/OAI/arXiv/ http://arxiv.org/OAI/arXiv.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://arxiv.org/OAI/arXiv/">
<id>0704.0025</id>
<created>2007-04-02</created>
<authors>
<author>
<keyname>Mishchenko</keyname>
<forenames>A. S.</forenames>
<affiliation>CREST, Japan Science and Technology Agency</affiliation>
<affiliation>Russian Research Centre ``Kurchatov Institute''</affiliation>
</author>
<author>
<keyname>Nagaosa</keyname>
<forenames>N.</forenames>
<affiliation>CREST, Japan Science and Technology Agency</affiliation>
<affiliation>The University of Tokyo</affiliation>
</author>
</authors>
<title>Spectroscopic Properties of Polarons in Strongly Correlated Systems byExact Diagrammatic Monte Carlo Method</title>
<categories>cond-mat.str-el cond-mat.stat-mech</categories>
<comments>41 pages, 13 figures, in "Polarons in Advanced Materials" ed. A. S.Alexandrov (Canopus/Springer Publishing, Bristol (2007)), pp. 503-544.</comments>
<doi>10.1007/978-1-4020-6348-0_12</doi>
<abstract> We present recent advances in understanding of the ground and excited states of the electron-phonon coupled systems obtained by novel methods of Diagrammatic Monte Carlo and Stochastic Optimization, which enable the approximation-free calculation of Matsubara Green function in imaginary times and perform unbiased analytic continuation to real frequencies. We present exact numeric results on the ground state properties, Lehmann spectral function and optical conductivity of different strongly correlated systems: Frohlich polaron, Rashba-Pekar exciton-polaron, pseudo Jahn-Teller polaron, exciton, and interacting with phonons hole in the t-J model. </abstract>
</arXiv>
"""
fake_json_record = {'@xsi:schemaLocation': 'http://arxiv.org/OAI/arXiv/ http://arxiv.org/OAI/arXiv.xsd',
'@xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'@xmlns': 'http://arxiv.org/OAI/arXiv/',
'id': '0704.0025',
'created': '2007-04-02',
'authors': {'author':[
{'keyname': 'Mishchenko',
'forenames': 'A. S.',
'affiliation':['CREST, Japan Science and Technology Agency',
"Russian Research Centre ``Kurchatov Institute''"]},
{'keyname': 'Nagaosa',
'forenames': 'N.',
'affiliation':['CREST, Japan Science and Technology Agency',
'The University of Tokyo']}]},
'title': 'Spectroscopic Properties of Polarons in Strongly Correlated Systems byExact Diagrammatic Monte Carlo Method',
'categories': 'cond-mat.str-el cond-mat.stat-mech',
'comments': '41 pages, 13 figures, in "Polarons in Advanced Materials" ed. A. S.Alexandrov (Canopus/Springer Publishing, Bristol (2007)), pp. 503-544.',
'doi': '10.1007/978-1-4020-6348-0_12',
'abstract': 'We present recent advances in understanding of the ground and excited states of the electron-phonon coupled systems obtained by novel methods of Diagrammatic Monte Carlo and Stochastic Optimization, which enable the approximation-free calculation of Matsubara Green function in imaginary times and perform unbiased analytic continuation to real frequencies. We present exact numeric results on the ground state properties, Lehmann spectral function and optical conductivity of different strongly correlated systems: Frohlich polaron, Rashba-Pekar exciton-polaron, pseudo Jahn-Teller polaron, exciton, and interacting with phonons hole in the t-J model.'}
fake_clean_json_record = {'id': '0704.0025',
'created': '2007-04-02',
'updated': '2007-04-02',
'authors': "Author+1, Author+2",
'title': 'Spectroscopic Properties of Polarons in Strongly Correlated Systems byExact Diagrammatic Monte Carlo Method',
'doi': '10.1007/978-1-4020-6348-0_12',
'abstract': 'We present recent advances in understanding of the ground and excited states of the electron-phonon coupled systems obtained by novel methods of Diagrammatic Monte Carlo and Stochastic Optimization, which enable the approximation-free calculation of Matsubara Green function in imaginary times and perform unbiased analytic continuation to real frequencies. We present exact numeric results on the ground state properties, Lehmann spectral function and optical conductivity of different strongly correlated systems: Frohlich polaron, Rashba-Pekar exciton-polaron, pseudo Jahn-Teller polaron, exciton, and interacting with phonons hole in the t-J model.'}
fake_clean_record_long_authors = {'id': '0704.0025',
'created': '2007-04-02',
'updated': '2007-04-02',
'authors': "Author+1, Author+2, Author+3, Author+4, Author+5, Author+6, Author+7, Author+8, Author+9, Author+10, Author+11, Author+12, ",
'title': 'Spectroscopic Properties of Polarons in Strongly Correlated Systems byExact Diagrammatic Monte Carlo Method',
'doi': '10.1007/978-1-4020-6348-0_12',
'abstract': 'We present recent advances in understanding of the ground and excited states of the electron-phonon coupled systems obtained by novel methods of Diagrammatic Monte Carlo and Stochastic Optimization, which enable the approximation-free calculation of Matsubara Green function in imaginary times and perform unbiased analytic continuation to real frequencies. We present exact numeric results on the ground state properties, Lehmann spectral function and optical conductivity of different strongly correlated systems: Frohlich polaron, Rashba-Pekar exciton-polaron, pseudo Jahn-Teller polaron, exciton, and interacting with phonons hole in the t-J model.'}
fake_bad_xml_records = [None, """I am not an XML record.""", dict(), list()]
fake_bad_json_records = [None, """I am not an JSON record.""", dict(), list()]
fake_good_author_data = {'author':[{'keyname':'Einstein','forenames':'Albert','affiliation':'Princeton'},
{'keyname':'Curie','forenames':'Marie','affiliation':['1','2']},
{'keyname':'Ada Lovelace'}]}
fake_bad_author_datas = [[None], ["""I am not author data."""],[ dict()], [list()]]
fake_dirty_text = """
There should be no linebreaks\n
or multiple spaces in this sentence once processed.
""" |
py | b40fe1d8b170b7ba9d98891774511b8cef1a5152 | import matplotlib.pyplot as plt
with open('data/time.tsv', 'r') as f:
data = f.read()
lines = data.split('\n')
timing = [line.split('\t') for line in lines]
func_names = set(item[0] for item in timing if item[0] != "")
time_dict = {}
for name in func_names:
for t in timing:
if t[0] == name:
if name in time_dict.keys():
time_dict[name].append(float(t[1]))
else:
time_dict[name] = [float(t[1])]
for func in time_dict.keys():
plt.plot(
[1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000],
time_dict[func], label=func
)
plt.legend()
plt.ylim(top=20)
plt.show()
plt.savefig('data/benchmark.png') |
py | b40fe2245dfb617f9c1dba39440e77cf06b01e07 | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
from anvil import HtmlPanel as _HtmlPanel
from anvil.js import get_dom_node as _get_dom_node
from ..Chip import Chip
from ..session import style_injector as _style_injector
from ..utils._component_helpers import _get_color, _spacing_property
from ._anvil_designer import ChipsInputTemplate
__version__ = "1.4.0"
_primary = _get_color(None)
_style_injector.inject(
"""
.anvil-extras-chips-input input {
box-shadow: none !important;
border: none !important;
padding: 7px 0 !important;
margin-bottom: 0 !important;
flex: 1;
min-width: 50px;
}
.anvil-extras-chips-input{
display: flex;
flex-wrap: wrap;
gap: 8px;
border-bottom: 1px solid;
align-items: center;
padding-bottom: 4px;
}
"""
)
_defaults = {
"primary_placeholder": "",
"secondary_placeholder": "",
"chips": [],
"visible": True,
"spacing_above": "small",
"spacing_below": "small",
}
class ChipsInput(ChipsInputTemplate):
def __init__(self, **properties):
self._chips = []
self._deleting = False
self._placeholder = self._placeholder_0 = self._placeholder_1 = ""
input_node = _get_dom_node(self.chip_input)
input_node.addEventListener("keydown", self._chip_input_key_down)
dom_node = self._dom_node = _get_dom_node(self)
dom_node.classList.add("anvil-extras-chips-input")
dom_node.querySelector(".chips-input-placeholder").remove()
dom_node.querySelector("script").remove()
self.temp_chip.remove_from_parent()
properties = _defaults | properties
self.init_components(**properties)
@property
def primary_placeholder(self):
return self._placeholder_0
@primary_placeholder.setter
def primary_placeholder(self, value):
self._placeholder_0 = value
if not len(self._chips):
self.chip_input.placeholder = value
self._placeholder = value
@property
def secondary_placeholder(self):
return self._placeholder_1
@secondary_placeholder.setter
def secondary_placeholder(self, value):
self._placeholder_1 = value
if len(self._chips):
self.chip_input.placeholder = value
self._placeholder = value
@property
def chips(self):
# make sure chips is immutable
return tuple(self._chips)
@chips.setter
def chips(self, value):
value = value or []
if list(value) == self._chips:
return
self._chips = []
self.clear(slot="chips")
seen = set()
for chip_text in value:
if chip_text in seen or not chip_text:
continue
chip = Chip(text=chip_text, spacing_above="none", spacing_below="none")
self.add_component(chip, slot="chips")
chip.set_event_handler("close_click", self._chip_close_click)
self._chips.append(chip_text)
seen.add(chip_text)
self._reset_placeholder()
visible = _HtmlPanel.visible
spacing_above = _spacing_property("above")
spacing_below = _spacing_property("below")
###### PRIVATE METHODS AND PROPS ######
@property
def _last_chip(self):
"""throws an error if we have no chips, when used must be wrapped in try/except"""
components = self.get_components()
components.remove(self.chip_input)
return components[-1]
def _reset_placeholder(self):
new_placeholder = self._placeholder_1 if self._chips else self._placeholder_0
if new_placeholder != self._placeholder:
self.chip_input.placeholder = self._placeholder = new_placeholder
def _reset_deleting(self, val):
try:
self._deleting = val
self._set_focus(self._last_chip, val)
except IndexError:
pass
def _chip_input_pressed_enter(self, **event_args):
"""This method is called when the user presses Enter in this text box"""
chip_text = self.chip_input.text
if chip_text and chip_text not in self._chips:
chip = Chip(text=chip_text, spacing_above="none", spacing_below="none")
self.add_component(chip, slot="chips")
chip.set_event_handler("close_click", self._chip_close_click)
self._chips.append(chip_text)
self.chip_input.text = ""
self._reset_placeholder()
self.raise_event("chips_changed")
self.raise_event("chip_added", chip=chip_text)
def _chip_input_key_down(self, js_event):
"""This method is called when on the user key down in this text box"""
try:
if not self.chip_input.text and js_event.key == "Backspace":
if not self._deleting:
self._reset_deleting(True)
return
_last_chip = self._last_chip
self._chips.pop()
chip_text = _last_chip.text
_last_chip.remove_from_parent()
self._reset_placeholder()
self.raise_event("chips_changed")
self.raise_event("chip_removed", chip=chip_text)
self._set_focus(self._last_chip, True)
elif self._deleting:
self._reset_deleting(False)
if js_event.key == "Tab":
js_event.preventDefault()
except IndexError:
pass
def _chip_input_focus(self, **event_args):
"""This method is called when the TextBox gets focus"""
self._dom_node.style.borderBottom = f"1px solid {_primary}"
def _chip_input_lost_focus(self, **event_args):
"""This method is called when the TextBox loses focus"""
self._dom_node.style.borderBottom = "1px solid"
self._reset_deleting(False)
def _chip_close_click(self, sender, **event_args):
chips = self._chips
chip_text = sender.text
chips.remove(chip_text)
sender.remove_from_parent()
self.raise_event("chips_changed")
self.raise_event("chip_removed", chip=chip_text)
@staticmethod
def _set_focus(chip, val):
chip.background = _primary if val else ""
chip.chip_label.foreground = "#fff" if val else ""
chip.close_link.foreground = "#fff" if val else ""
|
py | b40fe27ac5450f7d95be72d8e6a7668e2b112a5a | # coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Size(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'width': 'float',
'height': 'float'
}
attribute_map = {
'width': 'width',
'height': 'height'
}
def __init__(self, width=None, height=None): # noqa: E501
"""Size - a model defined in OpenAPI""" # noqa: E501
self._width = None
self._height = None
self.discriminator = None
self.width = width
self.height = height
@property
def width(self):
"""Gets the width of this Size. # noqa: E501
The width of this position # noqa: E501
:return: The width of this Size. # noqa: E501
:rtype: float
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this Size.
The width of this position # noqa: E501
:param width: The width of this Size. # noqa: E501
:type: float
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width
@property
def height(self):
"""Gets the height of this Size. # noqa: E501
The height of this position # noqa: E501
:return: The height of this Size. # noqa: E501
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this Size.
The height of this position # noqa: E501
:param height: The height of this Size. # noqa: E501
:type: float
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Size):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40fe36088858448951dd1813b75e99580f17b2a | '''Use function via console.
## Command Line Usage
```shell
$ gua
Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/15.14986
$ gua -n chrome
Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3489.10 Safari/537.36
$ gua -o android
Mozilla/5.0 (Linux; Android 8.1; Huawei P20 Lite Build/OPR3.170623.008) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3297.48 Mobile Safari/537.36
$ gua -n safari -o ios
Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_3 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/9.0 Mobile/13G34 Safari/602.2
```
'''
from argparse import ArgumentParser
from .user_agent import generate_user_agent
def script_gua():
parser = ArgumentParser(
usage='%(prog)s [options] usage',
description='Generates User-Agent HTTP header',
)
parser.add_argument('-o', '--os',
help='limit list of os for generation, possible values:\
"win", "linux", "mac", "android", "ios", "all"')
parser.add_argument('-n', '--navigator',
help='limit list of browser engines for generation, possible values:\
"chrome", "firefox", "ie", "edge", "safari", "opera", "all"')
parser.add_argument('-d', '--device-type', help='possible values:\
"desktop", "smartphone", "all"')
opts = parser.parse_args()
gua = generate_user_agent(os=opts.os,
navigator=opts.navigator,
device_type=opts.device_type)
print(gua)
|
py | b40fe38681a269c74f31beecc1dd57d07f1ca42c | from sc2 import UnitTypeId, AbilityId
from sc2.ids.buff_id import BuffId
from sc2.unit import Unit
from sharpy.plans.acts.act_base import ActBase
class ChronoAnyTech(ActBase):
ENERGY_COST = 50
def __init__(self, save_to_energy: int):
assert save_to_energy is not None and isinstance(save_to_energy, int)
self.save_to_energy = save_to_energy
self.types = [UnitTypeId.FORGE, UnitTypeId.ROBOTICSBAY, UnitTypeId.TWILIGHTCOUNCIL, UnitTypeId.TEMPLARARCHIVE,
UnitTypeId.CYBERNETICSCORE, UnitTypeId.DARKSHRINE, UnitTypeId.FLEETBEACON]
super().__init__()
async def execute(self):
# if ai.already_pending_upgrade(self.name):
target: Unit
nexus: Unit
for target in self.cache.own(self.types).ready:
for order in target.orders:
# TODO: Chrono only up to 90% or 95% complete.
ability_id = order.ability.id
# boost here!
if not target.has_buff(BuffId.CHRONOBOOSTENERGYCOST):
for nexus in self.cache.own(UnitTypeId.NEXUS):
if nexus.energy > self.save_to_energy + ChronoAnyTech.ENERGY_COST:
self.do(nexus(AbilityId.EFFECT_CHRONOBOOSTENERGYCOST, target))
self.print(f'Chrono {ability_id.name}')
return True # Never block and only boost one building per iteration
return True # Never block
|
py | b40fe3d8a9b0b14a1f6010bf37c5b33dadd4e173 | # -*- coding: utf-8 -*-
import argparse
import cv2
from matplotlib import pyplot as plt
import numpy as np
import os
from PIL import Image
from pprint import pprint
import sys
from lib.image_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.processing_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="media/*.jpg", help="Input file pattern; can be a single file or a glob string")
parser.add_argument('-out', dest="OUTPUT_DIR", default="output/segments/", help="Segment data output directory")
# parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing data?")
parser.add_argument('-blur', dest="BLUR_RADIUS", default=0.0, type=float, help="Guassian blur radius, e.g. 2.0")
parser.add_argument('-thresh', dest="THRESHOLD", default=0.99, type=float, help="Only include segments with at least this score")
parser.add_argument('-validate', dest="VALIDATE", action="store_true", help="Validate images?")
parser.add_argument('-debug', dest="DEBUG", action="store_true", help="Display plot of first result?")
a = parser.parse_args()
OUTPUT_FILE = a.OUTPUT_DIR + "segments.csv"
filenames = getFilenames(a.INPUT_FILE)
filecount = len(filenames)
if a.VALIDATE:
filenames = validateImages(filenames)
# Make sure output dirs exist
makeDirectories(a.OUTPUT_DIR)
def imageToSegment(filename, outFilename):
global a
# Read image, convert to grayscale, do threshold
im_in = cv2.imread(filename)
gray = cv2.cvtColor(im_in, cv2.COLOR_BGR2GRAY)
th, im_th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_th | im_floodfill_inv
# Display images.
if a.DEBUG:
# cv2.imshow("Original Image", im_in)
# cv2.imshow("Thresholded Image", im_th)
# cv2.imshow("Floodfilled Image", im_floodfill)
# cv2.imshow("Inverted Floodfilled Image", im_floodfill_inv)
cv2.imshow("Foreground", im_out)
cv2.waitKey(0)
# now try to get the largest segment
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im_out, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(1, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
maskWithLargestSegment = np.zeros(output.shape)
maskWithLargestSegment[output == max_label] = 255
if a.DEBUG:
cv2.imshow("Biggest component", maskWithLargestSegment)
cv2.waitKey(0)
# get bounding box
width = stats[max_label, cv2.CC_STAT_WIDTH]
height = stats[max_label, cv2.CC_STAT_HEIGHT]
x = stats[max_label, cv2.CC_STAT_LEFT]
y = stats[max_label, cv2.CC_STAT_TOP]
imageMaskWithLargestSegment = Image.fromarray(maskWithLargestSegment)
imageMaskWithLargestSegment = imageMaskWithLargestSegment.convert("L")
imageMaskWithLargestSegment = imageMaskWithLargestSegment.crop((x, y, x+width, y+height))
srcImage = Image.open(filename)
srcImage = srcImage.convert("RGBA")
srcImage = srcImage.crop((x, y, x+width, y+height))
segmentOut = alphaMask(srcImage, imageMaskWithLargestSegment)
segmentOut.save(outFilename)
print("Saved %s" % outFilename)
return (x, y, width, height)
# imageToSegment("E:/production/papercuts/downloads/fish/6006022416.jpg", "E:/production/papercuts/segments/fish/6006022416.png")
# sys.exit()
segmentRows = []
for i, fn in enumerate(filenames):
segmentFilename = getBasename(fn) + ".png"
segmentFilepath = a.OUTPUT_DIR + segmentFilename
x, y, w, h = imageToSegment(fn, segmentFilepath)
printProgress(i+1, filecount)
segmentRows.append({
"sourceFilename": os.path.basename(fn),
"filename": segmentFilename,
"x": x,
"y": y,
"width": w,
"height": h
})
if a.DEBUG:
break
if a.DEBUG:
sys.exit()
writeCsv(OUTPUT_FILE, segmentRows)
|
py | b40fe4b190cefd3085a390691f53511320b9ab53 | #! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Character and character-sequence data structures.
"""
import warnings
import copy
import collections
from dendropy.utility.textprocessing import StringIO
from dendropy.utility import textprocessing
from dendropy.utility import error
from dendropy.utility import deprecate
from dendropy.utility import container
from dendropy.datamodel import charstatemodel
from dendropy.datamodel.charstatemodel import DNA_STATE_ALPHABET
from dendropy.datamodel.charstatemodel import RNA_STATE_ALPHABET
from dendropy.datamodel.charstatemodel import NUCLEOTIDE_STATE_ALPHABET
from dendropy.datamodel.charstatemodel import PROTEIN_STATE_ALPHABET
from dendropy.datamodel.charstatemodel import RESTRICTION_SITES_STATE_ALPHABET
from dendropy.datamodel.charstatemodel import INFINITE_SITES_STATE_ALPHABET
from dendropy.datamodel import basemodel
from dendropy.datamodel import taxonmodel
from dendropy import dataio
###############################################################################
## ContinuousCharElement
class ContinuousCharElement(
basemodel.DataObject,
basemodel.Annotable):
def __init__(self, value, column_def=None, label=None):
basemodel.DataObject.__init__(self,
label=label)
self.value = value
self.column_def = column_def
###############################################################################
## CharacterType
class CharacterType(
basemodel.DataObject,
basemodel.Annotable):
"""
A character format or type of a particular column: i.e., maps a particular
set of character state definitions to a column in a character matrix.
"""
def __init__(self,
label=None,
state_alphabet=None):
basemodel.DataObject.__init__(self, label=label)
self._state_alphabet = None
self.state_alphabet = state_alphabet
def _get_state_alphabet(self):
"""
The |StateAlphabet| representing the state alphabet for this
column: i.e., the collection of symbols and the state identities to
which they map.
"""
return self._state_alphabet
def _set_state_alphabet(self, value):
self._state_alphabet = value
state_alphabet = property(_get_state_alphabet, _set_state_alphabet)
def __copy__(self, memo=None):
raise TypeError("Cannot directly copy {}".format(self.__class__.__name__))
def taxon_namespace_scoped_copy(self, memo=None):
raise TypeError("Cannot directly copy {}".format(self.__class__.__name__))
def __deepcopy__(self, memo=None):
return basemodel.Annotable.__deepcopy__(self, memo=memo)
###############################################################################
## CharacterDataSequence
class CharacterDataSequence(
basemodel.Annotable,
):
"""
A sequence of character values or values for a particular taxon or entry in
a data matrix.
Objects of this class can be (almost) treated as simple lists, where the
elements are the values of characters (typically, real values in the case
of continuous data, and special instances of |StateIdentity| objects in the
case of discrete data.
Character type data (represented by |CharacterType| instances) and metadata
annotations (represented by |AnnotationSet| instances), if any, are
maintained in a parallel list that need to be accessed separately using the
index of the value to which the data correspond. So, for example, the
|AnnotationSet| object containing the metadata annotations for the first
value in a sequence, ``s[0]``, is available through
``s.annotations_at(0)``, while the character type information for that
first element is available through ``s.character_type_at(0)`` and can be
set through ``s.set_character_type_at(0, c)``.
In most cases where metadata annotations and character type information are
not needed, treating objects of this class as a simple list provides all
the functionality needed. Where metadata annotations or character type
information are required, all the standard list mutation methods (e.g.,
``CharacterDataSequence.insert``, ``CharacterDataSequence.append``,
``CharacterDataSequence.extend``) also take optional ``character_type``
and ``character_annotations`` argument in addition to the primary
``character_value`` argument, thus allowing for setting of the value,
character type, and annotation set simultaneously. While iteration over
character values are available through the standard list iteration
interface, the method ``CharacterDataSequence.cell_iter()`` provides for
iterating over ``<character-value, character-type,
character-annotation-set>`` triplets.
"""
###############################################################################
## Life-cycle
def __init__(self,
character_values=None,
character_types=None,
character_annotations=None):
"""
Parameters
----------
character_values : iterable of values
A set of values for this sequence.
"""
self._character_values = []
self._character_types = []
self._character_annotations = []
if character_values:
self.extend(
character_values=character_values,
character_types=character_types,
character_annotations=character_annotations)
###############################################################################
## Life-cycle
# def values(self):
# return list(self._character_values)
def values(self):
"""
Returns list of values of this vector.
Returns
-------
v : list
List of values making up this vector.
"""
return self._character_values
def symbols_as_list(self):
"""
Returns list of string representation of values of this vector.
Returns
-------
v : list
List of string representation of values making up this vector.
"""
return list(str(cs) for cs in self._character_values)
def symbols_as_string(self, sep=""):
"""
Returns values of this vector as a single string, with individual value
elements separated by ``sep``.
Returns
-------
s : string
String representation of values making up this vector.
"""
return sep.join(str(cs) for cs in self._character_values)
def __str__(self):
return self.symbols_as_string()
def append(self, character_value, character_type=None, character_annotations=None):
"""
Adds a value to ``self``.
Parameters
----------
character_value : object
Value to be stored.
character_type : |CharacterType|
Description of character value.
character_annotations : |AnnotationSet|
Metadata annotations associated with this character.
"""
self._character_values.append(character_value)
self._character_types.append(character_type)
self._character_annotations.append(character_annotations)
def extend(self, character_values, character_types=None, character_annotations=None):
"""
Extends ``self`` with values.
Parameters
----------
character_values : iterable of objects
Values to be stored.
character_types : iterable of |CharacterType| objects
Descriptions of character values.
character_annotations : iterable |AnnotationSet| objects
Metadata annotations associated with characters.
"""
self._character_values.extend(character_values)
if character_types is None:
self._character_types.extend( [None] * len(character_values) )
else:
assert len(character_types) == len(character_values)
self._character_types.extend(character_types)
if character_annotations is None:
self._character_annotations.extend( [None] * len(character_values) )
else:
assert len(character_annotations) == len(character_values)
self._character_annotations.extend(character_annotations)
def __len__(self):
return len(self._character_values)
def __getitem__(self, idx):
return self._character_values[idx]
def __setitem__(self, idx, value):
self._character_values[idx] = value
def __iter__(self):
return self.__next__()
def __next__(self):
for v in self._character_values:
yield v
next = __next__ # Python 2 legacy support
def cell_iter(self):
"""
Iterate over triplets of character values and associated
|CharacterType| and |AnnotationSet| instances.
"""
for v, t, a in zip(self._character_values, self._character_types, self._character_annotations):
yield v, t, a
def __delitem__(self, idx):
del self._character_values[idx]
del self._character_types[idx]
del self._character_annotations[idx]
def set_at(self, idx, character_value, character_type=None, character_annotations=None):
"""
Set value and associated character type and metadata annotations for
element at ``idx``.
Parameters
----------
idx : integer
Index of element to set.
character_value : object
Value to be stored.
character_type : |CharacterType|
Description of character value.
character_annotations : |AnnotationSet|
Metadata annotations associated with this character.
"""
to_add = (idx+1) - len(self._character_values)
while to_add > 0:
self.append(None)
to_add -= 1
self._character_values[idx] = character_value
self._character_types[idx] = character_type
self._character_annotations[idx] = character_annotations
def insert(self, idx, character_value, character_type=None, character_annotations=None):
"""
Insert value and associated character type and metadata annotations for
element at ``idx``.
Parameters
----------
idx : integer
Index of element to set.
character_value : object
Value to be stored.
character_type : |CharacterType|
Description of character value.
character_annotations : |AnnotationSet|
Metadata annotations associated with this character.
"""
self._character_values.insert(idx, character_value)
self._character_types.insert(idx, character_type)
self._character_annotations.insert(idx, character_annotations)
def value_at(self, idx):
"""
Return value of character at ``idx``.
Parameters
----------
idx : integer
Index of element value to return.
Returns
-------
c : object
Value of character at index ``idx``.
"""
return self._character_values[idx]
def character_type_at(self, idx):
"""
Return type of character at ``idx``.
Parameters
----------
idx : integer
Index of element character type to return.
Returns
-------
c : |CharacterType|
|CharacterType| associated with character index ``idx``.
"""
return self._character_types[idx]
def annotations_at(self, idx):
"""
Return metadata annotations of character at ``idx``.
Parameters
----------
idx : integer
Index of element annotations to return.
Returns
-------
c : |AnnotationSet|
|AnnotationSet| representing metadata annotations of character at index ``idx``.
"""
if self._character_annotations[idx] is None:
self._character_annotations[idx] = basemodel.AnnotationSet()
return self._character_annotations[idx]
def has_annotations_at(self, idx):
"""
Return |True| if character at ``idx`` has metadata annotations.
Parameters
----------
idx : integer
Index of element annotations to check.
Returns
-------
b : bool
|True| if character at ``idx`` has metadata annotations, |False|
otherwise.
"""
return not self._character_annotations[idx] is None
def set_character_type_at(self, idx, character_type):
"""
Set type of character at ``idx``.
Parameters
----------
idx : integer
Index of element character type to set.
"""
self._character_types[idx] = character_type
def set_annotations_at(self, idx, annotations):
"""
Set metadata annotations of character at ``idx``.
Parameters
----------
idx : integer
Index of element annotations to set.
"""
self._character_annotations[idx] = annotations
###############################################################################
## Subset of Character (Columns)
class CharacterSubset(
basemodel.DataObject,
basemodel.Annotable,
):
"""
Tracks definition of a subset of characters.
"""
def __init__(self, label=None, character_indices=None):
"""
Parameters
----------
label: str
Name of this subset.
character_indices: iterable of ``int``
Iterable of 0-based (integer) indices of column positions that
constitute this subset.
"""
basemodel.DataObject.__init__(self, label=label)
if character_indices is None:
self.character_indices = set()
else:
self.character_indices = set(character_indices)
def __len__(self):
return len(self.character_indices)
def __iter__(self):
return iter(self.character_indices)
def __deepcopy__(self, memo):
return basemodel.Annotable.__deepcopy__(self, memo=memo)
###############################################################################
## CharacterMatrix
class CharacterMatrix(
taxonmodel.TaxonNamespaceAssociated,
basemodel.Annotable,
basemodel.Deserializable,
basemodel.NonMultiReadable,
basemodel.Serializable,
basemodel.DataObject):
"""
A data structure that manages assocation of operational taxononomic unit
concepts to sequences of character state identities or values.
This is a base class that provides general functionality; derived classes
specialize for particular data types. You will not be using the class
directly, but rather one of the derived classes below, specialized for data
types such as DNA, RNA, continuous, etc.
This class and derived classes behave like a dictionary where the keys are
|Taxon| objects and the values are `CharacterDataSequence` objects. Access
to sequences based on taxon labels as well as indexes are also provided.
Numerous methods are provided to manipulate and iterate over sequences.
Character partitions can be managed through `CharacterSubset` objects,
while management of detailed metadata on character types are available
through |CharacterType| objects.
Objects can be instantiated by reading data from external sources through
the usual ``get_from_stream()``, ``get_from_path()``, or
``get_from_string()`` functions. In addition, a single matrix object can be
instantiated from multiple matrices (``concatenate()``) or data sources
(``concatenate_from_paths``).
A range of methods also exist for importing data from another matrix object.
These vary depending on how "new" and "existing" are treated. A "new"
sequence is a sequence in the other matrix associated with a |Taxon|
object for which there is no sequence defined in the current matrix. An
"existing" sequence is a sequence in the other matrix associated with a
|Taxon| object for which there *is* a sequence defined in the
current matrix.
+---------------------------------+---------------------------------------------+--------------------------------------------+
| | New Sequences: IGNORED | New Sequences: ADDED |
+=================================+=============================================+============================================+
| Existing Sequences: IGNORED | [NO-OP] | :meth:`CharacterMatrix.add_sequences()` |
+---------------------------------+---------------------------------------------+--------------------------------------------+
| Existing Sequences: OVERWRITTEN | :meth:`CharacterMatrix.replace_sequences()` | :meth:`CharacterMatrix.update_sequences()` |
+---------------------------------+---------------------------------------------+--------------------------------------------+
| Existing Sequences: EXTENDED | :meth:`CharacterMatrix.extend_sequences()` | :meth:`CharacterMatrix.extend_matrix()` |
+---------------------------------+---------------------------------------------+--------------------------------------------+
If character subsets have been defined, these subsets can be exported to independent matrices.
"""
###########################################################################
### Class Variables
data_type = None
character_sequence_type = CharacterDataSequence
###########################################################################
### Factory (Class) Methods
def _parse_and_create_from_stream(cls,
stream,
schema,
matrix_offset=0,
**kwargs):
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
if taxon_namespace is None:
taxon_namespace = taxonmodel.TaxonNamespace()
def tns_factory(label):
if label is not None and taxon_namespace.label is None:
taxon_namespace.label = label
return taxon_namespace
label = kwargs.pop("label", None)
kwargs["data_type"] = cls.data_type
reader = dataio.get_reader(schema, **kwargs)
char_matrices = reader.read_char_matrices(
stream=stream,
taxon_namespace_factory=tns_factory,
char_matrix_factory=new_char_matrix,
state_alphabet_factory=charstatemodel.StateAlphabet,
global_annotations_target=None)
if len(char_matrices) == 0:
raise ValueError("No character data in data source")
char_matrix = char_matrices[matrix_offset]
if char_matrix.data_type != cls.data_type:
raise ValueError(
"Data source (at offset {}) is of type '{}', "
"but current CharacterMatrix is of type '{}'.".format(
matrix_offset,
char_matrix.data_type,
cls.data_type))
return char_matrix
_parse_and_create_from_stream = classmethod(_parse_and_create_from_stream)
@classmethod
def get(cls, **kwargs):
"""
Instantiate and return a *new* character matrix object from a data source.
**Mandatory Source-Specification Keyword Argument (Exactly One of the Following Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`fasta </schemas/fasta>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`",
":doc:`phylip </schemas/phylip>`", etc.
See "|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **label** (*str*) -- Name or identifier to be assigned to the new
object; if not given, will be assigned the one specified in the
data source, or |None| otherwise.
- **taxon_namespace** (|TaxonNamespace|) -- The |TaxonNamespace|
instance to use to :doc:`manage the taxon names </primer/taxa>`.
If not specified, a new one will be created.
- **matrix_offset** (*int*) -- 0-based index of character block or
matrix in source to be parsed. If not specified then the
first matrix (offset = 0) is assumed.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
dna1 = dendropy.DnaCharacterMatrix.get(
file=open("pythonidae.fasta"),
schema="fasta")
dna2 = dendropy.DnaCharacterMatrix.get(
url="http://purl.org/phylo/treebase/phylows/matrix/TB2:M2610?format=nexus",
schema="nexus")
aa1 = dendropy.ProteinCharacterMatrix.get(
file=open("pythonidae.dat"),
schema="phylip")
std1 = dendropy.StandardCharacterMatrix.get(
path="python_morph.nex",
schema="nexus")
std2 = dendropy.StandardCharacterMatrix.get(
data=">t1\\n01011\\n\\n>t2\\n11100",
schema="fasta")
"""
return cls._get_from(**kwargs)
def concatenate(cls, char_matrices):
"""
Creates and returns a single character matrix from multiple
CharacterMatrix objects specified as a list, 'char_matrices'.
All the CharacterMatrix objects in the list must be of the
same type, and share the same TaxonNamespace reference. All taxa
must be present in all alignments, all all alignments must
be of the same length. Component parts will be recorded as
character subsets.
"""
taxon_namespace = char_matrices[0].taxon_namespace
nseqs = len(char_matrices[0])
concatenated_chars = cls(taxon_namespace=taxon_namespace)
pos_start = 0
for cidx, cm in enumerate(char_matrices):
if cm.taxon_namespace is not taxon_namespace:
raise ValueError("Different ``taxon_namespace`` references in matrices to be merged")
if len(cm) != len(taxon_namespace):
raise ValueError("Number of sequences not equal to the number of taxa")
if len(cm) != nseqs:
raise ValueError("Different number of sequences across alignments: %d (expecting %d based on first matrix)" % (len(cm), nseqs))
v1 = len(cm[0])
for t, s in cm.items():
if len(s) != v1:
raise ValueError("Unequal length sequences in character matrix %d".format(cidx+1))
concatenated_chars.extend_matrix(cm)
if cm.label is None:
new_label = "locus%03d" % cidx
else:
new_label = cm.label
cs_label = new_label
i = 2
while cs_label in concatenated_chars.character_subsets:
label = "%s_%03d" % (new_label, i)
i += 1
character_indices = range(pos_start, pos_start + cm.vector_size)
pos_start += cm.vector_size
concatenated_chars.new_character_subset(character_indices=character_indices,
label=cs_label)
return concatenated_chars
concatenate = classmethod(concatenate)
def concatenate_from_streams(cls, streams, schema, **kwargs):
"""
Read a character matrix from each file object given in ``streams``,
assuming data format/schema ``schema``, and passing any keyword arguments
down to the underlying specialized reader. Merge the character matrices
and return the combined character matrix. Component parts will be
recorded as character subsets.
"""
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
if taxon_namespace is None:
taxon_namespace = taxonmodel.TaxonNamespace()
kwargs["taxon_namespace"] = taxon_namespace
char_matrices = []
for stream in streams:
char_matrices.append(cls.get_from_stream(stream,
schema=schema, **kwargs))
return cls.concatenate(char_matrices)
concatenate_from_streams = classmethod(concatenate_from_streams)
def concatenate_from_paths(cls, paths, schema, **kwargs):
"""
Read a character matrix from each file path given in ``paths``, assuming
data format/schema ``schema``, and passing any keyword arguments down to
the underlying specialized reader. Merge the and return the combined
character matrix. Component parts will be recorded as character
subsets.
"""
streams = [open(path, "rU") for path in paths]
return cls.concatenate_from_streams(streams, schema, **kwargs)
concatenate_from_paths = classmethod(concatenate_from_paths)
def from_dict(cls,
source_dict,
char_matrix=None,
case_sensitive_taxon_labels=False,
**kwargs):
"""
Populates character matrix from dictionary (or similar mapping type),
creating |Taxon| objects and sequences as needed.
Keys must be strings representing labels |Taxon| objects or
|Taxon| objects directly. If key is specified as string, then it
will be dereferenced to the first existing |Taxon| object in the
current taxon namespace with the same label. If no such |Taxon|
object can be found, then a new |Taxon| object is created and
added to the current namespace. If a key is specified as a
|Taxon| object, then this is used directly. If it is not in the
current taxon namespace, it will be added.
Values are the sequences (more generally, iterable of values). If
values are of type `CharacterDataSequence`, then they are added
as-is. Otherwise `CharacterDataSequence` instances are
created for them. Values may be coerced into types compatible with
particular matrices. The classmethod ``coerce_values()`` will be
called for this.
Examples
--------
The following creates a |DnaCharacterMatrix| instance with three
sequences::
d = {
"s1" : "TCCAA",
"s2" : "TGCAA",
"s3" : "TG-AA",
}
dna = DnaCharacterMatrix.from_dict(d)
Three |Taxon| objects will be created, corresponding to the
labels 's1', 's2', 's3'. Each associated string sequence will be
converted to a `CharacterDataSequence`, with each symbol ("A", "C",
etc.) being replaced by the DNA state represented by the symbol.
Parameters
----------
source_dict : dict or other mapping type
Keys must be strings representing labels |Taxon| objects or
|Taxon| objects directly. Values are sequences. See above
for details.
char_matrix : |CharacterMatrix|
Instance of |CharacterMatrix| to populate with data. If not
specified, a new one will be created using keyword arguments
specified by ``kwargs``.
case_sensitive_taxon_labels : boolean
If |True|, matching of string labels specified as keys in ``d`` will
be matched to |Taxon| objects in current taxon namespace
with case being respected. If |False|, then case will be ignored.
\*\*kwargs : keyword arguments, optional
Keyword arguments to be passed to constructor of
|CharacterMatrix| when creating new instance to populate, if
no target instance is provided via ``char_matrix``.
Returns
-------
char_matrix : |CharacterMatrix|
|CharacterMatrix| populated by data from ``d``.
"""
if char_matrix is None:
char_matrix = cls(**kwargs)
for key in source_dict:
if textprocessing.is_str_type(key):
taxon = char_matrix.taxon_namespace.require_taxon(key,
is_case_sensitive=case_sensitive_taxon_labels)
else:
taxon = key
if taxon not in char_matrix.taxon_namespace:
char_matrix.taxon_namespace.add_taxon(taxon)
s = char_matrix.coerce_values(source_dict[key])
char_matrix[taxon] = s
return char_matrix
from_dict = classmethod(from_dict)
###########################################################################
### Lifecycle and Identity
def __init__(self, *args, **kwargs):
if len(args) > 1:
# only allow 1 positional argument
raise error.TooManyArgumentsError(func_name=self.__class__.__name__, max_args=1, args=args)
elif len(args) == 1 and isinstance(args[0], CharacterMatrix):
self._clone_from(args[0], kwargs)
else:
basemodel.DataObject.__init__(self, label=kwargs.pop("label", None))
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None))
self._taxon_sequence_map = {}
self.character_types = []
self.comments = []
self.character_subsets = container.OrderedCaselessDict()
if len(args) == 1:
# takes care of all possible initializations, including. e.g.,
# tuples and so on
d = collections.OrderedDict(args[0])
self.__class__.from_dict(d, char_matrix=self)
if kwargs:
raise TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def _clone_from(self, src, kwargs_dict):
# super(Tree, self).__init__()
memo = {}
# memo[id(tree)] = self
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs_dict, src.taxon_namespace)
memo[id(src.taxon_namespace)] = taxon_namespace
if taxon_namespace is not src.taxon_namespace:
for t1 in src.taxon_namespace:
t2 = taxon_namespace.require_taxon(label=t1.label)
memo[id(t1)] = t2
else:
for t1 in src.taxon_namespace:
memo[id(t1)] = t1
t = copy.deepcopy(src, memo)
self.__dict__ = t.__dict__
self.label = kwargs_dict.pop("label", src.label)
return self
def __copy__(self):
other = self.__class__(label=self.label,
taxon_namespace=self.taxon_namespace)
for taxon in self._taxon_sequence_map:
# other._taxon_sequence_map[taxon] = self.__class__.character_sequence_type(self._taxon_sequence_map[taxon])
other._taxon_sequence_map[taxon] = self._taxon_sequence_map[taxon]
memo = {}
memo[id(self)] = other
other.deep_copy_annotations_from(self, memo)
return other
def taxon_namespace_scoped_copy(self, memo=None):
if memo is None:
memo = {}
# this populates ``memo`` with references to the
# the TaxonNamespace and Taxon objects
self.taxon_namespace.populate_memo_for_taxon_namespace_scoped_copy(memo)
return self.__deepcopy__(memo=memo)
def __deepcopy__(self, memo=None):
return basemodel.Annotable.__deepcopy__(self, memo=memo)
###########################################################################
### Data I/O
# def _parse_and_add_from_stream(self, stream, schema, **kwargs):
# """
# Populates objects of this type from ``schema``-formatted
# data in the file-like object source ``stream``, *replacing*
# all current data. If multiple character matrices are in the data
# source, a 0-based index of the character matrix to use can
# be specified using the ``matrix_offset`` keyword (defaults to 0, i.e., first
# character matrix).
# """
# warnings.warn("Repopulating a CharacterMatrix is now deprecated. Instantiate a new instance from the source instead.",
# DeprecationWarning)
# m = self.__class__._parse_and_create_from_stream(stream=stream,
# schema=schema,
# **kwargs)
# return self.clone_from(m)
def _format_and_write_to_stream(self, stream, schema, **kwargs):
"""
Writes out ``self`` in ``schema`` format to a destination given by
file-like object ``stream``.
Parameters
----------
stream : file or file-like object
Destination for data.
schema : string
Must be a recognized character file schema, such as "nexus",
"phylip", etc, for which a specialized writer is available. If this
is not implemented for the schema specified, then a
UnsupportedSchemaError is raised.
\*\*kwargs : keyword arguments, optional
Keyword arguments will be passed directly to the writer for the
specified schema. See documentation for details on keyword
arguments supported by writers of various schemas.
"""
writer = dataio.get_writer(schema, **kwargs)
writer.write_char_matrices([self],
stream)
###########################################################################
### Taxon Management
def reconstruct_taxon_namespace(self,
unify_taxa_by_label=True,
taxon_mapping_memo=None):
"""
See `TaxonNamespaceAssociated.reconstruct_taxon_namespace`.
"""
if taxon_mapping_memo is None:
taxon_mapping_memo = {}
original_taxa = list(self._taxon_sequence_map.keys())
for original_taxon in original_taxa:
if unify_taxa_by_label or original_taxon not in self.taxon_namespace:
t = taxon_mapping_memo.get(original_taxon, None)
if t is None:
# taxon to use not given and
# we have not yet created a counterpart
if unify_taxa_by_label:
# this will force usage of any taxon with
# a label that matches the current taxon
t = self.taxon_namespace.require_taxon(label=original_taxon.label)
else:
# this will unconditionally create a new taxon
t = self.taxon_namespace.new_taxon(label=original_taxon.label)
taxon_mapping_memo[original_taxon] = t
else:
# taxon to use is given by mapping
self.taxon_namespace.add_taxon(t)
if t in self._taxon_sequence_map:
raise error.TaxonNamespaceReconstructionError("Multiple sequences for taxon with label '{}'".format(t.label))
self._taxon_sequence_map[t] = self._taxon_sequence_map[original_taxon]
del self._taxon_sequence_map[original_taxon]
def poll_taxa(self, taxa=None):
"""
Returns a set populated with all of |Taxon| instances associated
with ``self``.
Parameters
----------
taxa : set()
Set to populate. If not specified, a new one will be created.
Returns
-------
taxa : set[|Taxon|]
Set of taxa associated with ``self``.
"""
if taxa is None:
taxa = set()
for taxon in self._taxon_sequence_map:
taxa.add(taxon)
return taxa
def update_taxon_namespace(self):
"""
All |Taxon| objects in ``self`` that are not in
``self.taxon_namespace`` will be added.
"""
assert self.taxon_namespace is not None
for taxon in self._taxon_sequence_map:
if taxon not in self.taxon_namespace:
self.taxon_namespace.add_taxon(taxon)
def reindex_subcomponent_taxa(self):
"""
Synchronizes |Taxon| objects of map to ``taxon_namespace`` of self.
"""
raise NotImplementedError("'reindex_subcomponent_taxa()' is no longer supported; use '{}.reconstruct_taxon_namespace()' instead".format(self.__class__.__name__))
###########################################################################
### Sequence CRUD
def _resolve_key(self, key):
"""
Resolves map access key into |Taxon| instance.
If ``key`` is integer, assumed to be taxon index.
If ``key`` string, assumed to be taxon label.
Otherwise, assumed to be |Taxon| instance directly.
"""
if isinstance(key, int):
if abs(key) < len(self.taxon_namespace):
taxon = self.taxon_namespace[key]
else:
raise IndexError(key)
elif textprocessing.is_str_type(key):
taxon = self.taxon_namespace.get_taxon(label=key)
if taxon is None:
raise KeyError(key)
else:
taxon = key
return taxon
def new_sequence(self, taxon, values=None):
"""
Creates a new `CharacterDataSequence` associated with |Taxon|
``taxon``, and populates it with values in ``values``.
Parameters
----------
taxon : |Taxon|
|Taxon| instance with which this sequence is associated.
values : iterable or |None|
An initial set of values with which to populate the new character
sequence.
Returns
-------
s : `CharacterDataSequence`
A new `CharacterDataSequence` associated with |Taxon|
``taxon``.
"""
if taxon in self._taxon_sequence_map:
raise ValueError("Character values vector for taxon {} already exists".format(repr(taxon)))
if taxon not in self.taxon_namespace:
raise ValueError("Taxon {} is not in object taxon namespace".format(repr(taxon)))
cv = self.__class__.character_sequence_type(values)
self._taxon_sequence_map[taxon] = cv
return cv
def __getitem__(self, key):
"""
Retrieves sequence for ``key``, which can be a index or a label of a
|Taxon| instance in the current taxon namespace, or a
|Taxon| instance directly.
If no sequence is currently associated with specified |Taxon|, a
new one will be created. Note that the |Taxon| object must have
already been defined in the curent taxon namespace.
Parameters
----------
key : integer, string, or |Taxon|
If an integer, assumed to be an index of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
If a string, assumed to be a label of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
Otherwise, assumed to be |Taxon| instance directly. In all
cases, the |Taxon| object must be (already) defined in the
current taxon namespace.
Returns
-------
s : `CharacterDataSequence`
A sequence associated with the |Taxon| instance referenced
by ``key``.
"""
taxon = self._resolve_key(key)
try:
return self._taxon_sequence_map[taxon]
except KeyError:
return self.new_sequence(taxon)
def __setitem__(self, key, values):
"""
Assigns sequence ``values`` to taxon specified by ``key``, which can be a
index or a label of a |Taxon| instance in the current taxon
namespace, or a |Taxon| instance directly.
If no sequence is currently associated with specified |Taxon|, a
new one will be created. Note that the |Taxon| object must have
already been defined in the curent taxon namespace.
Parameters
----------
key : integer, string, or |Taxon|
If an integer, assumed to be an index of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
If a string, assumed to be a label of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
Otherwise, assumed to be |Taxon| instance directly. In all
cases, the |Taxon| object must be (already) defined in the
current taxon namespace.
"""
taxon = self._resolve_key(key)
if taxon not in self.taxon_namespace:
raise ValueError(repr(key))
if not isinstance(values, self.__class__.character_sequence_type):
values = self.__class__.character_sequence_type(values)
self._taxon_sequence_map[taxon] = values
def __contains__(self, key):
"""
Returns |True| if a sequence associated with ``key`` is in ``self``, or
|False| otherwise.
Parameters
----------
key : integer, string, or |Taxon|
If an integer, assumed to be an index of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
If a string, assumed to be a label of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
Otherwise, assumed to be |Taxon| instance directly. In all
cases, the |Taxon| object must be (already) defined in the
current taxon namespace.
Returns
-------
b : boolean
|True| if ``key`` is in ``self``; |False| otherwise.
"""
return self._taxon_sequence_map.__contains__(key)
def __delitem__(self, key):
"""
Removes sequence for ``key``, which can be a index or a label of a
|Taxon| instance in the current taxon namespace, or a
|Taxon| instance directly.
Parameters
----------
key : integer, string, or |Taxon|
If an integer, assumed to be an index of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
If a string, assumed to be a label of a |Taxon| object in
the current |TaxonNamespace| object of ``self.taxon_namespace``.
Otherwise, assumed to be |Taxon| instance directly. In all
cases, the |Taxon| object must be (already) defined in the
current taxon namespace.
"""
return self._taxon_sequence_map.__delitem__(key)
def clear(self):
"""
Removes all sequences from matrix.
"""
self._taxon_sequence_map.clear()
def sequences(self):
"""
List of all sequences in self.
Returns
-------
s : list of `CharacterDataSequence` objects in self
"""
s = [self[taxon] for taxon in self]
return s
def vectors(self):
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: 'vectors()' will no longer be supported in future releases; use 'sequences()' instead")
return self.sequences()
###########################################################################
### Symbol/alphabet management
def coerce_values(self, values):
"""
Converts elements of ``values`` to type of matrix.
This method is called by :meth:`CharacterMatrix.from_dict` to create
sequences from iterables of values. This method should be overridden
by derived classes to ensure that ``values`` consists of types compatible
with the particular type of matrix. For example, a CharacterMatrix type
with a fixed state alphabet (such as |DnaCharacterMatrix|) would
dereference the string elements of ``values`` to return a list of
|StateIdentity| objects corresponding to the symbols represented
by the strings. If there is no value-type conversion done, then
``values`` should be returned as-is. If no value-type conversion is
possible (e.g., when the type of a value is dependent on positionaly
information), then a TypeError should be raised.
Parameters
----------
values : iterable
Iterable of values to be converted.
Returns
-------
v : list of values.
"""
return values
###########################################################################
### Sequence Access Iteration
def __iter__(self):
"Returns an iterator over character map's ordered keys."
for t in self.taxon_namespace:
if t in self._taxon_sequence_map:
yield t
def values(self):
"""
Iterates values (i.e. sequences) in this matrix.
"""
for t in self:
yield self[t]
# def iterkeys(self):
# "Dictionary interface implementation for direct access to character map."
# for t in self.taxon_namespace:
# if t in self._taxon_sequence_map:
# yield t
# def itervalues(self):
# "Dictionary interface implementation for direct access to character map."
# for t in self.taxon_namespace:
# if t in self._taxon_sequence_map:
# yield self._taxon_sequence_map[t]
def items(self):
"Returns character map key, value pairs in key-order."
for t in self.taxon_namespace:
if t in self._taxon_sequence_map:
yield t, self._taxon_sequence_map[t]
# def values(self):
# "Returns list of values."
# return [self._taxon_sequence_map[t] for t in self.taxon_namespace if t in self._taxon_seq_map]
# def pop(self, key, alt_val=None):
# "a.pop(k[, x]): a[k] if k in a, else x (and remove k)"
# return self._taxon_sequence_map.pop(key, alt_val)
# def popitem(self):
# "a.popitem() remove and last (key, value) pair"
# return self._taxon_sequence_map.popitem()
# def keys(self):
# "Returns a copy of the ordered list of character map keys."
# return list(self._taxon_sequence_map.keys())
###########################################################################
### Metrics
def __len__(self):
"""
Number of sequences in matrix.
Returns
-------
n : Number of sequences in matrix.
"""
return len(self._taxon_sequence_map)
def _get_sequence_size(self):
"""
Number of characters in *first* sequence in matrix.
Returns
-------
n : integer
Number of sequences in matrix.
"""
if len(self):
# yuck, but len(self.values())
# means we have to create and populate a list ...
return len(self[next(iter(self._taxon_sequence_map))])
else:
return 0
sequence_size = property(_get_sequence_size, None, None)
vector_size = property(_get_sequence_size, None, None) # legacy
def _get_max_sequence_size(self):
"""
Maximum number of characters across all sequences in matrix.
Returns
-------
n : integer
Maximum number of characters across all sequences in matrix.
"""
max_len = 0
for k in self:
if len(self[k]) > max_len:
max_len = len(self._taxon_sequence_map[k])
return max_len
max_sequence_size = property(_get_max_sequence_size, None, None)
###########################################################################
### Mass/Bulk Operations
def fill(self, value, size=None, append=True):
"""
Pads out all sequences in ``self`` by adding ``value`` to each sequence
until its length is ``size`` long or equal to the length of the longest
sequence if ``size`` is not specified.
Parameters
----------
value : object
A valid value (e.g., a numeric value for continuous characters, or
a |StateIdentity| for discrete character).
size : integer or None
The size (length) up to which the sequences will be padded. If |None|, then
the maximum (longest) sequence size will be used.
append : boolean
If |True| (default), then new values will be added to the end of
each sequence. If |False|, then new values will be inserted to the
front of each sequence.
"""
if size is None:
size = self.max_sequence_size
for k in self:
v = self[k]
while len(v) < size:
if append:
v.append(value)
else:
v.insert(0, value)
return size
def fill_taxa(self):
"""
Adds a new (empty) sequence for each |Taxon| instance in
current taxon namespace that does not have a sequence.
"""
for taxon in self.taxon_namespace:
if taxon not in self:
self[taxon] = CharacterDataSequence()
def pack(self, value=None, size=None, append=True):
"""
Adds missing sequences for all |Taxon| instances in current
namespace, and then pads out all sequences in ``self`` by adding ``value``
to each sequence until its length is ``size`` long or equal to the length
of the longest sequence if ``size`` is not specified. A combination of
:meth:`CharacterMatrix.fill_taxa()` and
:meth:`CharacterMatrix.fill()`.
Parameters
----------
value : object
A valid value (e.g., a numeric value for continuous characters, or
a |StateIdentity| for discrete character).
size : integer or None
The size (length) up to which the sequences will be padded. If |None|, then
the maximum (longest) sequence size will be used.
append : boolean
If |True| (default), then new values will be added to the end of
each sequence. If |False|, then new values will be inserted to the
front of each sequence.
"""
self.fill_taxa()
self.fill(value=value, size=size, append=append)
def add_sequences(self, other_matrix):
"""
Adds sequences for |Taxon| objects that are in ``other_matrix`` but not in
``self``.
Parameters
----------
other_matrix : |CharacterMatrix|
Matrix from which to add sequences.
Notes
-----
1. ``other_matrix`` must be of same type as ``self``.
2. ``other_matrix`` must have the same |TaxonNamespace| as ``self``.
3. Each sequence associated with a |Taxon| reference in ``other_matrix``
but not in ``self`` will be added to ``self`` as a shallow-copy.
4. All other sequences will be ignored.
"""
if other_matrix.taxon_namespace is not self.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, other_matrix)
for taxon in other_matrix._taxon_sequence_map:
if taxon not in self._taxon_sequence_map:
self._taxon_sequence_map[taxon] = self.__class__.character_sequence_type(other_matrix._taxon_sequence_map[taxon])
def replace_sequences(self, other_matrix):
"""
Replaces sequences for |Taxon| objects shared between ``self`` and
``other_matrix``.
Parameters
----------
other_matrix : |CharacterMatrix|
Matrix from which to replace sequences.
Notes
-----
1. ``other_matrix`` must be of same type as ``self``.
2. ``other_matrix`` must have the same |TaxonNamespace| as ``self``.
3. Each sequence in ``self`` associated with a |Taxon| that is
also represented in ``other_matrix`` will be replaced with a
shallow-copy of the corresponding sequence from ``other_matrix``.
4. All other sequences will be ignored.
"""
if other_matrix.taxon_namespace is not self.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, other_matrix)
for taxon in other_matrix._taxon_sequence_map:
if taxon in self._taxon_sequence_map:
self._taxon_sequence_map[taxon] = self.__class__.character_sequence_type(other_matrix._taxon_sequence_map[taxon])
def update_sequences(self, other_matrix):
"""
Replaces sequences for |Taxon| objects shared between ``self`` and
``other_matrix`` and adds sequences for |Taxon| objects that are
in ``other_matrix`` but not in ``self``.
Parameters
----------
other_matrix : |CharacterMatrix|
Matrix from which to update sequences.
Notes
-----
1. ``other_matrix`` must be of same type as ``self``.
2. ``other_matrix`` must have the same |TaxonNamespace| as ``self``.
3. Each sequence associated with a |Taxon| reference in ``other_matrix``
but not in ``self`` will be added to ``self``.
4. Each sequence in ``self`` associated with a |Taxon| that is
also represented in ``other_matrix`` will be replaced with a
shallow-copy of the corresponding sequence from ``other_matrix``.
"""
if other_matrix.taxon_namespace is not self.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, other_matrix)
for taxon in other_matrix._taxon_sequence_map:
self._taxon_sequence_map[taxon] = self.__class__.character_sequence_type(other_matrix._taxon_sequence_map[taxon])
def extend_sequences(self, other_matrix):
"""
Extends sequences in ``self`` with characters associated with
corresponding |Taxon| objects in ``other_matrix``.
Parameters
----------
other_matrix : |CharacterMatrix|
Matrix from which to extend sequences.
Notes
-----
1. ``other_matrix`` must be of same type as ``self``.
2. ``other_matrix`` must have the same |TaxonNamespace| as ``self``.
3. Each sequence associated with a |Taxon| reference in
``other_matrix`` that is also in ``self`` will be appended to the
sequence currently associated with that |Taxon| reference
in ``self``.
4. All other sequences will be ignored.
"""
if other_matrix.taxon_namespace is not self.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, other_matrix)
for taxon in other_matrix._taxon_sequence_map:
if taxon in self._taxon_sequence_map:
self._taxon_sequence_map[taxon].extend(other_matrix._taxon_sequence_map[taxon])
def extend_matrix(self, other_matrix):
"""
Extends sequences in ``self`` with characters associated with
corresponding |Taxon| objects in ``other_matrix`` and adds
sequences for |Taxon| objects that are in ``other_matrix`` but not
in ``self``.
Parameters
----------
other_matrix : |CharacterMatrix|
Matrix from which to extend.
Notes
-----
1. ``other_matrix`` must be of same type as ``self``.
2. ``other_matrix`` must have the same |TaxonNamespace| as ``self``.
3. Each sequence associated with a |Taxon| reference in ``other_matrix``
that is also in ``self`` will be appending
to the sequence currently associated with that |Taxon|
reference in ``self``.
4. Each sequence associated with a |Taxon| reference in
``other_matrix`` that is also in ``self`` will replace the sequence
currently associated with that |Taxon| reference in ``self``.
"""
if other_matrix.taxon_namespace is not self.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, other_matrix)
for taxon in other_matrix._taxon_sequence_map:
if taxon in self._taxon_sequence_map:
self._taxon_sequence_map[taxon].extend(other_matrix._taxon_sequence_map[taxon])
else:
self._taxon_sequence_map[taxon]= self.__class__.character_sequence_type(other_matrix._taxon_sequence_map[taxon])
def remove_sequences(self, taxa):
"""
Removes sequences associated with |Taxon| instances specified in
``taxa``. A KeyError is raised if a |Taxon| instance is
specified for which there is no associated sequences.
Parameters
----------
taxa : iterable[|Taxon|]
List or some other iterable of |Taxon| instances.
"""
for taxon in taxa:
del self._taxon_sequence_map[taxon]
def discard_sequences(self, taxa):
"""
Removes sequences associated with |Taxon| instances specified in
``taxa`` if they exist.
Parameters
----------
taxa : iterable[|Taxon|]
List or some other iterable of |Taxon| instances.
"""
for taxon in taxa:
try:
del self._taxon_sequence_map[taxon]
except KeyError:
pass
def keep_sequences(self, taxa):
"""
Discards all sequences *not* associated with any of the |Taxon| instances.
Parameters
----------
taxa : iterable[|Taxon|]
List or some other iterable of |Taxon| instances.
"""
to_keep = set(taxa)
for taxon in self._taxon_sequence_map:
if taxon not in to_keep:
del self._taxon_sequence_map[taxon]
# def extend_characters(self, other_matrix):
# """
# DEPRECATED
# Extends this matrix by adding characters from sequences of taxa
# in given matrix to sequences of taxa with correspond labels in
# this one. Taxa in the second matrix that do not exist in the
# current one are ignored.
# """
# self._taxon_sequence_map.extend_characters(other_matrix.taxon_seq_map)
# def extend_map(self,
# other_map,
# overwrite_existing=False,
# extend_existing=False):
# """
# DEPRECATED
# Extends this matrix by adding taxa and characters from the given
# map to this one. If ``overwrite_existing`` is True and a taxon
# in the other map is already present in the current one, then
# the sequence associated with the taxon in the second map
# replaces the sequence in the current one. If ``extend_existing``
# is True and a taxon in the other matrix is already present in
# the current one, then the squence map with the taxon in
# the second map will be added to the sequence in the current
# one. If both are True, then an exception is raised. If neither
# are True, and a taxon in the other map is already present in
# the current one, then the sequence is ignored.
# """
# self._taxon_sequence_map.extend(other_map,
# overwrite_existing=overwrite_existing,
# extend_existing=extend_existing)
# self.update_taxon_namespace()
# def extend(self,
# other_matrix,
# overwrite_existing=False,
# extend_existing=False):
# """
# Extends this matrix by adding taxa and characters from the given
# matrix to this one. If ``overwrite_existing`` is True and a taxon
# in the other matrix is already present in the current one, then
# the sequence associated with the taxon in the second matrix
# replaces the sequence in the current one. If ``extend_existing``
# is True and a taxon in the other matrix is already present in
# the current one, then the sequence associated with the taxon in
# the second matrix will be added to the sequence in the current
# one. If both are True, then an exception is raised. If neither
# are True, and a taxon in the other matrix is already present in
# the current one, then the sequence is ignored.
# """
# self._taxon_sequence_map.extend(other_matrix.taxon_seq_map,
# overwrite_existing=overwrite_existing,
# extend_existing=extend_existing)
# self.update_taxon_namespace()
###########################################################################
### Character Subset Management
def add_character_subset(self, char_subset):
"""
Adds a CharacterSubset object. Raises an error if one already exists
with the same label.
"""
label = char_subset.label
if label in self.character_subsets:
raise ValueError("Character subset '%s' already defined" % label)
self.character_subsets[label] = char_subset
return self.character_subsets[label]
def new_character_subset(self, label, character_indices):
"""
Defines a set of character (columns) that make up a character set.
Raises an error if one already exists with the same label. Column
indices are 0-based.
"""
cs = CharacterSubset(character_indices=character_indices, label=label)
return self.add_character_subset(cs)
###########################################################################
### CharacterType Management
def new_character_type(self, *args, **kwargs):
return CharacterType(*args, **kwargs)
###########################################################################
### Export
def export_character_subset(self, character_subset):
"""
Returns a new CharacterMatrix (of the same type) consisting only
of columns given by the CharacterSubset, ``character_subset``.
Note that this new matrix will still reference the same taxon set.
"""
if textprocessing.is_str_type(character_subset):
if character_subset not in self.character_subsets:
raise KeyError(character_subset)
else:
character_subset = self.character_subsets[character_subset]
return self.export_character_indices(character_subset.character_indices)
def export_character_indices(self, indices):
"""
Returns a new CharacterMatrix (of the same type) consisting only
of columns given by the 0-based indices in ``indices``.
Note that this new matrix will still reference the same taxon set.
"""
clone = self.__class__(self)
# clear out character subsets; otherwise all indices will have to be
# recalculated, which will require some careful and perhaps arbitrary
# handling of corner cases
clone.character_subsets = container.OrderedCaselessDict()
indices = set(indices)
for vec in clone.values():
for cell_idx in range(len(vec)-1, -1, -1):
if cell_idx not in indices:
del(vec[cell_idx])
return clone
###########################################################################
### Representation
def description(self, depth=1, indent=0, itemize="", output=None):
"""
Returns description of object, up to level ``depth``.
"""
if depth is None or depth < 0:
return
output_strio = StringIO()
label = " (%s: '%s')" % (id(self), self.label)
output_strio.write('%s%s%s object at %s%s'
% (indent*' ',
itemize,
self.__class__.__name__,
hex(id(self)),
label))
if depth >= 1:
output_strio.write(': %d Sequences' % len(self))
if depth >= 2:
if self.taxon_namespace is not None:
tlead = "\n%s[Taxon Set]\n" % (" " * (indent+4))
output_strio.write(tlead)
self.taxon_namespace.description(depth=depth-1, indent=indent+8, itemize="", output=output_strio)
tlead = "\n%s[Characters]\n" % (" " * (indent+4))
output_strio.write(tlead)
indent += 8
maxlabel = max([len(str(t.label)) for t in self.taxon_namespace])
for i, t in enumerate(self.taxon_namespace):
output_strio.write('%s%s%s : %s characters\n' \
% (" " * indent,
"[%d] " % i,
str(t.label),
len(self._taxon_sequence_map[t])))
s = output_strio.getvalue()
if output is not None:
output.write(s)
return s
###########################################################################
### Legacy
def _get_taxon_seq_map(self):
warnings.warn("All methods and features of 'CharacterMatrix.taxon_seq_map' have been integrated directly into 'CharacterMatrix', or otherwise replaced entirely",
stacklevel=2)
return self
taxon_seq_map = property(_get_taxon_seq_map)
###############################################################################
## Specialized Matrices
### Continuous Characters ##################################################
class ContinuousCharacterDataSequence(CharacterDataSequence):
"""
A sequence of continuous character values for a particular taxon or entry
in a data matrix. Specializes `CharacterDataSequence` by assuming all
values are primitive numerics (i.e., either floats or integers) when
copying or representing self.
"""
def symbols_as_list(self):
"""
Returns list of string representation of values of this vector.
Returns
-------
v : list
List of string representation of values making up this vector.
"""
return [str(v) for v in self]
def symbols_as_string(self, sep=" "):
# different default
return CharacterDataSequence.symbols_as_string(self, sep=sep)
class ContinuousCharacterMatrix(CharacterMatrix):
"""
Specializes |CharacterMatrix| for continuous data.
Sequences stored using |ContinuousCharacterDataSequence|, with values of
elements assumed to be ``float`` .
"""
character_sequence_type = ContinuousCharacterDataSequence
data_type = "continuous"
def __init__(self, *args, **kwargs):
CharacterMatrix.__init__(self, *args, **kwargs)
### Discrete Characters ##################################################
class DiscreteCharacterDataSequence(CharacterDataSequence):
pass
class DiscreteCharacterMatrix(CharacterMatrix):
character_sequence_type = DiscreteCharacterDataSequence
data_type = "discrete"
def __init__(self, *args, **kwargs):
CharacterMatrix.__init__(self, *args, **kwargs)
self.state_alphabets = []
self._default_state_alphabet = None
def _get_default_state_alphabet(self):
if self._default_state_alphabet is not None:
return self._default_state_alphabet
elif len(self.state_alphabets) == 1:
return self.state_alphabets[0]
elif len(self.state_alphabets) > 1:
raise TypeError("Multiple state alphabets defined for this matrix with no default specified")
elif len(self.state_alphabets) == 0:
raise TypeError("No state alphabets defined for this matrix")
return None
def _set_default_state_alphabet(self, s):
if s not in self.state_alphabets:
self.state_alphabets.append(s)
self._default_state_alphabet = s
default_state_alphabet = property(_get_default_state_alphabet, _set_default_state_alphabet)
def append_taxon_sequence(self, taxon, state_symbols):
if taxon not in self:
self[taxon] = CharacterDataSequence()
for value in state_symbols:
if textprocessing.is_str_type(value):
symbol = value
else:
symbol = str(value)
self[taxon].append(self.default_symbol_state_map[symbol])
def remap_to_state_alphabet_by_symbol(self,
state_alphabet,
purge_other_state_alphabets=True):
"""
All entities with any reference to a state alphabet will be have the
reference reassigned to state alphabet ``sa``, and all entities with
any reference to a state alphabet element will be have the reference
reassigned to any state alphabet element in ``sa`` that has the same
symbol. Raises KeyError if no matching symbol can be found.
"""
for vi, vec in enumerate(self._taxon_sequence_map.values()):
for ci, cell in enumerate(vec):
vec[ci] = state_alphabet[cell.symbol]
for ct in self.character_types:
if ct is not None:
ct.state_alphabet = state_alphabet
if purge_other_state_alphabets:
self.default_state_alphabet = state_alphabet
def remap_to_default_state_alphabet_by_symbol(self,
purge_other_state_alphabets=True):
"""
All entities with any reference to a state alphabet will be have the
reference reassigned to the default state alphabet, and all entities
with any reference to a state alphabet element will be have the
reference reassigned to any state alphabet element in the default
state alphabet that has the same symbol. Raises ValueError if no
matching symbol can be found.
"""
self.remap_to_state_alphabet_by_symbol(
state_alphabet=self.default_state_alphabet,
purge_other_state_alphabets=purge_other_state_alphabets)
def taxon_state_sets_map(self,
char_indices=None,
gaps_as_missing=True,
gap_state=None,
no_data_state=None):
"""
Returns a dictionary that maps taxon objects to lists of sets of
fundamental state indices.
Parameters
----------
char_indices : iterable of ints
An iterable of indexes of characters to include (by column). If not
given or |None| [default], then all characters are included.
gaps_as_missing : boolean
If |True| [default] then gap characters will be treated as missing
data values. If |False|, then they will be treated as an additional
(fundamental) state.`
Returns
-------
d : dict
A dictionary with class:|Taxon| objects as keys and a list of sets
of fundamental state indexes as values.
E.g., Given the following matrix of DNA characters:
T1 AGN
T2 C-T
T3 GC?
Return with ``gaps_as_missing==True`` ::
{
<T1> : [ set([0]), set([2]), set([0,1,2,3]) ],
<T2> : [ set([1]), set([0,1,2,3]), set([3]) ],
<T3> : [ set([2]), set([1]), set([0,1,2,3]) ],
}
Return with ``gaps_as_missing==False`` ::
{
<T1> : [ set([0]), set([2]), set([0,1,2,3]) ],
<T2> : [ set([1]), set([4]), set([3]) ],
<T3> : [ set([2]), set([1]), set([0,1,2,3,4]) ],
}
Note that when gaps are treated as a fundamental state, not only
does '-' map to a distinct and unique state (4), but '?' (missing
data) maps to set consisting of all bases *and* the gap
state, whereas 'N' maps to a set of all bases but not including the
gap state.
When gaps are treated as missing, on the other hand, then '?' and
'N' and '-' all map to the same set, i.e. of all the bases.
"""
taxon_to_state_indices = {}
for t in self:
cdv = self[t]
if char_indices is None:
ci = range(len(cdv))
else:
ci = char_indices
v = []
for char_index in ci:
state = cdv[char_index]
if gaps_as_missing:
v.append(set(state.fundamental_indexes_with_gaps_as_missing))
else:
v.append(set(state.fundamental_indexes))
taxon_to_state_indices[t] = v
return taxon_to_state_indices
### Fixed Alphabet Characters ##################################################
class FixedAlphabetCharacterDataSequence(CharacterDataSequence):
pass
class FixedAlphabetCharacterMatrix(DiscreteCharacterMatrix):
character_sequence_type = FixedAlphabetCharacterDataSequence
data_type = "fixed"
datatype_alphabet = None
def __init__(self, *args, **kwargs):
DiscreteCharacterMatrix.__init__(self, *args, **kwargs)
self.state_alphabets.append(self.__class__.datatype_alphabet)
self._default_state_alphabet = self.__class__.datatype_alphabet
def coerce_values(self, values):
if self.datatype_alphabet is None:
raise ValueError("'datatype_alphabet' not set")
return charstatemodel.coerce_to_state_identities(
state_alphabet=self.datatype_alphabet,
values=values)
### DNA Characters ##################################################
class DnaCharacterDataSequence(FixedAlphabetCharacterDataSequence):
pass
class DnaCharacterMatrix(FixedAlphabetCharacterMatrix):
"""
Specializes |CharacterMatrix| for DNA data.
"""
character_sequence_type = DnaCharacterDataSequence
data_type = "dna"
datatype_alphabet = DNA_STATE_ALPHABET
### RNA Characters ##################################################
class RnaCharacterDataSequence(FixedAlphabetCharacterDataSequence):
pass
class RnaCharacterMatrix(FixedAlphabetCharacterMatrix):
"""
Specializes |CharacterMatrix| for DNA data.
"""
character_sequence_type = RnaCharacterDataSequence
data_type = "rna"
datatype_alphabet = RNA_STATE_ALPHABET
### Nucleotide Characters ##################################################
class NucleotideCharacterDataSequence(FixedAlphabetCharacterDataSequence):
pass
class NucleotideCharacterMatrix(FixedAlphabetCharacterMatrix):
"""
Specializes |CharacterMatrix| for RNA data.
"""
character_sequence_type = NucleotideCharacterDataSequence
data_type = "nucleotide"
datatype_alphabet = NUCLEOTIDE_STATE_ALPHABET
### Protein Characters ##################################################
class ProteinCharacterDataSequence(FixedAlphabetCharacterDataSequence):
pass
class ProteinCharacterMatrix(FixedAlphabetCharacterMatrix):
"""
Specializes |CharacterMatrix| for protein or amino acid data.
"""
character_sequence_type = ProteinCharacterDataSequence
data_type = "protein"
datatype_alphabet = PROTEIN_STATE_ALPHABET
### Restricted Site Characters ##################################################
class RestrictionSitesCharacterDataSequence(FixedAlphabetCharacterDataSequence):
pass
class RestrictionSitesCharacterMatrix(FixedAlphabetCharacterMatrix):
"""
Specializes |CharacterMatrix| for restriction site data.
"""
character_sequence_type = RestrictionSitesCharacterDataSequence
data_type = "restriction"
datatype_alphabet = RESTRICTION_SITES_STATE_ALPHABET
### Infinite Sites Characters ##################################################
class InfiniteSitesCharacterDataSequence(FixedAlphabetCharacterDataSequence):
pass
class InfiniteSitesCharacterMatrix(FixedAlphabetCharacterMatrix):
"""
Specializes |CharacterMatrix| for infinite sites data.
"""
character_sequence_type = InfiniteSitesCharacterDataSequence
data_type = "infinite"
datatype_alphabet = INFINITE_SITES_STATE_ALPHABET
### Standard Characters ##################################################
class StandardCharacterDataSequence(DiscreteCharacterDataSequence):
pass
class StandardCharacterMatrix(DiscreteCharacterMatrix):
"""
Specializes |CharacterMatrix| for "standard" data (i.e., generic discrete
character data).
"""
character_sequence_type = StandardCharacterDataSequence
data_type = "standard"
def __init__(self, *args, **kwargs):
"""
A default state alphabet consisting of state symbols of 0-9 will
automatically be created unless the ``default_state_alphabet=None`` is
passed in. To specify a different default state alphabet::
default_state_alphabet=dendropy.new_standard_state_alphabet("abc")
default_state_alphabet=dendropy.new_standard_state_alphabet("ij")
"""
if "default_state_alphabet" in kwargs:
default_state_alphabet = kwargs.pop("default_state_alphabet")
else:
default_state_alphabet = charstatemodel.new_standard_state_alphabet()
DiscreteCharacterMatrix.__init__(self, *args, **kwargs)
if default_state_alphabet is not None:
self.default_state_alphabet = default_state_alphabet
def coerce_values(self, values):
if self.default_state_alphabet is None:
raise ValueError("'default_state_alphabet' not set")
return charstatemodel.coerce_to_state_identities(
state_alphabet=self.default_state_alphabet,
values=values)
###############################################################################
## Main Character Matrix Factory Function
data_type_matrix_map = {
'continuous' : ContinuousCharacterMatrix,
'dna' : DnaCharacterMatrix,
'rna' : RnaCharacterMatrix,
'nucleotide' : NucleotideCharacterMatrix,
'protein' : ProteinCharacterMatrix,
'standard' : StandardCharacterMatrix,
'restriction' : RestrictionSitesCharacterMatrix,
'infinite' : InfiniteSitesCharacterMatrix,
}
def get_char_matrix_type(data_type):
if data_type is None:
raise TypeError("'data_type' must be specified")
matrix_type = data_type_matrix_map.get(data_type, None)
if matrix_type is None:
raise KeyError("Unrecognized data type specification: '{}'".format(data_type,
sorted(data_type_matrix_map.keys())))
return matrix_type
def new_char_matrix(data_type, **kwargs):
matrix_type = get_char_matrix_type(data_type=data_type)
m = matrix_type(**kwargs)
return m
|
py | b40fe4f36224b241a62db48affb51c1a476ec336 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import copy
import numpy as np
import astropy.units as u
from ..utils.scripts import make_path
from ..utils.fitting import Fit
from .. import stats
from .utils import CountsPredictor
from . import SpectrumObservationList, SpectrumObservation
__all__ = ["SpectrumFit"]
log = logging.getLogger(__name__)
class SpectrumFit(Fit):
"""Orchestrate a 1D counts spectrum fit.
After running the :func:`~gammapy.spectrum.SpectrumFit.run` method, the fit
results are available in :func:`~gammapy.spectrum.SpectrumFit.result`. For usage
examples see :ref:`spectral_fitting`
Parameters
----------
obs_list : `~gammapy.spectrum.SpectrumObservationList`, `~gammapy.spectrum.SpectrumObservation`
Observation(s) to fit
model : `~gammapy.spectrum.models.SpectralModel`
Source model with initial parameter values. Should return counts if
``forward_folded`` is False and a flux otherwise
stat : {'wstat', 'cash'}
Fit statistic
forward_folded : bool, default: True
Fold ``model`` with the IRFs given in ``obs_list``
fit_range : tuple of `~astropy.units.Quantity`
The intersection between the fit range and the observation thresholds will be used.
If you want to control which bins are taken into account in the fit for each
observation, use :func:`~gammapy.spectrum.PHACountsSpectrum.quality`
"""
def __init__(
self, obs_list, model, stat="wstat", forward_folded=True, fit_range=None
):
self.obs_list = obs_list
self._model = model.copy()
self.stat = stat
self.forward_folded = forward_folded
self.fit_range = fit_range
self._predicted_counts = None
self._statval = None
self._result = None
self._check_valid_fit()
self._apply_fit_range()
def __str__(self):
ss = self.__class__.__name__
ss += "\nSource model {}".format(self._model.__class__.__name__)
ss += "\nStat {}".format(self.stat)
ss += "\nForward Folded {}".format(self.forward_folded)
ss += "\nFit range {}".format(self.fit_range)
return ss
@property
def obs_list(self):
"""Observations participating in the fit"""
return self._obs_list
@obs_list.setter
def obs_list(self, obs_list):
if isinstance(obs_list, SpectrumObservation):
obs_list = SpectrumObservationList([obs_list])
self._obs_list = SpectrumObservationList(obs_list)
@property
def bins_in_fit_range(self):
"""Bins participating in the fit for each observation."""
return self._bins_in_fit_range
@property
def predicted_counts(self):
"""Current value of predicted counts.
For each observation a tuple to counts for the on and off region is
returned.
"""
return self._predicted_counts
@property
def statval(self):
"""Current value of statval.
For each observation the statval per bin is returned.
"""
return self._statval
@property
def fit_range(self):
"""Fit range."""
return self._fit_range
@fit_range.setter
def fit_range(self, fit_range):
self._fit_range = fit_range
self._apply_fit_range()
@property
def true_fit_range(self):
"""True fit range for each observation.
True fit range is the fit range set in the
`~gammapy.spectrum.SpectrumFit` with observation threshold taken into
account.
"""
true_range = []
for binrange, obs in zip(self.bins_in_fit_range, self.obs_list):
idx = np.where(binrange)[0]
if len(idx) == 0:
true_range.append(None)
continue
e_min = obs.e_reco[idx[0]]
e_max = obs.e_reco[idx[-1] + 1]
fit_range = u.Quantity((e_min, e_max))
true_range.append(fit_range)
return true_range
def _apply_fit_range(self):
"""Mark bins within desired fit range for each observation."""
self._bins_in_fit_range = []
for obs in self.obs_list:
# Take into account fit range
energy = obs.e_reco
valid_range = np.zeros(energy.nbins)
if self.fit_range is not None:
precision = 1e-3 # to avoid floating round precision
idx_lo = np.where(energy * (1 + precision) < self.fit_range[0])[0]
valid_range[idx_lo] = 1
idx_hi = np.where(energy[:-1] * (1 - precision) > self.fit_range[1])[0]
if len(idx_hi) != 0:
idx_hi = np.insert(idx_hi, 0, idx_hi[0] - 1)
valid_range[idx_hi] = 1
# Take into account thresholds
try:
quality = obs.on_vector.quality
except AttributeError:
quality = np.zeros(obs.e_reco.nbins)
intersection = np.logical_and(1 - quality, 1 - valid_range)
self._bins_in_fit_range.append(intersection)
def predict_counts(self):
"""Predict counts for all observations.
The result is stored as ``predicted_counts`` attribute.
"""
predicted_counts = []
for obs in self.obs_list:
mu_sig = self._predict_counts_helper(obs, self._model, self.forward_folded)
predicted_counts.append(mu_sig)
self._predicted_counts = predicted_counts
def _predict_counts_helper(self, obs, model, forward_folded=True):
"""Predict counts for one observation.
Parameters
----------
obs : `~gammapy.spectrum.SpectrumObservation`
Response functions
model : `~gammapy.spectrum.models.SpectralModel`
Source or background model
forward_folded : bool, default: True
Fold model with IRFs
Returns
------
predicted_counts : `numpy.ndarray`
Predicted counts for one observation
"""
predictor = CountsPredictor(model=model)
if forward_folded:
predictor.aeff = obs.aeff
predictor.edisp = obs.edisp
else:
predictor.e_true = obs.e_reco
predictor.livetime = obs.livetime
predictor.run()
counts = predictor.npred.data.data
# Check count unit (~unit of model amplitude)
if counts.unit.is_equivalent(""):
counts = counts.value
else:
raise ValueError("Predicted counts {}".format(counts))
# Apply AREASCAL column
counts *= obs.on_vector.areascal
return counts
def calc_statval(self):
"""Calc statistic for all observations.
The result is stored as attribute ``statval``, bin outside the fit
range are set to 0.
"""
statval = []
for obs, npred in zip(self.obs_list, self.predicted_counts):
on_stat = self._calc_statval_helper(obs, npred)
statval.append(on_stat)
self._statval = statval
self._restrict_statval()
def _calc_statval_helper(self, obs, prediction):
"""Calculate ``statval`` for one observation.
Parameters
----------
obs : `~gammapy.spectrum.SpectrumObservation`
Measured counts
prediction : tuple of `~numpy.ndarray`
Predicted counts
Returns
------
statsval : tuple of `~numpy.ndarray`
Statval
"""
if self.stat == "cash":
return stats.cash(n_on=obs.on_vector.data.data.value, mu_on=prediction)
elif self.stat == "cstat":
return stats.cstat(n_on=obs.on_vector.data.data.value, mu_on=prediction)
elif self.stat == "wstat":
on_stat_ = stats.wstat(
n_on=obs.on_vector.data.data.value,
n_off=obs.off_vector.data.data.value,
alpha=obs.alpha,
mu_sig=prediction,
)
return np.nan_to_num(on_stat_)
else:
raise NotImplementedError("{}".format(self.stat))
def total_stat(self, parameters):
"""Statistic summed over all bins and all observations.
This is the likelihood function that is passed to the optimizers
Parameters
----------
parameters : `~gammapy.utils.fitting.Parameters`
Model parameters
"""
self._model.parameters = parameters
self.predict_counts()
self.calc_statval()
total_stat = np.sum([np.sum(v) for v in self.statval], dtype=np.float64)
return total_stat
def _restrict_statval(self):
"""Apply valid fit range to statval.
"""
for statval, valid_range in zip(self.statval, self.bins_in_fit_range):
# Find bins outside safe range
idx = np.where(np.invert(valid_range))[0]
statval[idx] = 0
def _check_valid_fit(self):
"""Helper function to give useful error messages."""
# Assume that settings are the same for all observations
test_obs = self.obs_list[0]
irfs_exist = test_obs.aeff is not None or test_obs.edisp is not None
if self.forward_folded and not irfs_exist:
raise ValueError("IRFs required for forward folded fit")
if self.stat == "wstat" and self.obs_list[0].off_vector is None:
raise ValueError("Off vector required for WStat fit")
try:
test_obs.livetime
except KeyError:
raise ValueError("No observation livetime given")
@property
def result(self):
"""Bundle fit results into `~gammapy.spectrum.SpectrumFitResult`.
Parameters
----------
parameters : `~gammapy.utils.modeling.Parameters`
Best fit parameters
"""
from . import SpectrumFitResult
# run again with best fit parameters
model = self._model.copy()
statname = self.stat
results = []
for idx, obs in enumerate(self.obs_list):
fit_range = self.true_fit_range[idx]
statval = np.sum(self.statval[idx])
stat_per_bin = self.statval[idx]
npred = copy.deepcopy(self.predicted_counts[idx])
results.append(
SpectrumFitResult(
model=model,
fit_range=fit_range,
statname=statname,
statval=statval,
stat_per_bin=stat_per_bin,
npred=npred,
obs=obs,
)
)
return results
|
py | b40fe6f1aa65859fb58bf6a249475824beb58faf | # -*- coding: utf-8 -*-
"""
Algebraic connectivity and Fiedler vectors of undirected graphs.
"""
__author__ = """ysitu <[email protected]>"""
# Copyright (C) 2014 ysitu <[email protected]>
# All rights reserved.
# BSD license.
from functools import partial
import networkx as nx
from networkx.utils import not_implemented_for
from networkx.utils import reverse_cuthill_mckee_ordering
from re import compile
try:
from numpy import (array, asmatrix, asarray, dot, matrix, ndarray, ones,
reshape, sqrt, zeros)
from numpy.linalg import norm, qr
from numpy.random import normal
from scipy.linalg import eigh, inv
from scipy.sparse import csc_matrix, spdiags
from scipy.sparse.linalg import eigsh, lobpcg
__all__ = ['algebraic_connectivity', 'fiedler_vector', 'spectral_ordering']
except ImportError:
__all__ = []
try:
from scipy.linalg.blas import dasum, daxpy, ddot
except ImportError:
if __all__:
# Make sure the imports succeeded.
# Use minimal replacements if BLAS is unavailable from SciPy.
dasum = partial(norm, ord=1)
ddot = dot
def daxpy(x, y, a):
y += a * x
return y
_tracemin_method = compile('^tracemin(?:_(.*))?$')
class _PCGSolver(object):
"""Preconditioned conjugate gradient method.
"""
def __init__(self, A, M):
self._A = A
self._M = M or (lambda x: x.copy())
def solve(self, B, tol):
B = asarray(B)
X = ndarray(B.shape, order='F')
for j in range(B.shape[1]):
X[:, j] = self._solve(B[:, j], tol)
return X
def _solve(self, b, tol):
A = self._A
M = self._M
tol *= dasum(b)
# Initialize.
x = zeros(b.shape)
r = b.copy()
z = M(r)
rz = ddot(r, z)
p = z.copy()
# Iterate.
while True:
Ap = A(p)
alpha = rz / ddot(p, Ap)
x = daxpy(p, x, a=alpha)
r = daxpy(Ap, r, a=-alpha)
if dasum(r) < tol:
return x
z = M(r)
beta = ddot(r, z)
beta, rz = beta / rz, beta
p = daxpy(p, z, a=beta)
class _CholeskySolver(object):
"""Cholesky factorization.
"""
def __init__(self, A):
if not self._cholesky:
raise nx.NetworkXError('Cholesky solver unavailable.')
self._chol = self._cholesky(A)
def solve(self, B):
return self._chol(B)
try:
from scikits.sparse.cholmod import cholesky
_cholesky = cholesky
except ImportError:
_cholesky = None
class _LUSolver(object):
"""LU factorization.
"""
def __init__(self, A):
if not self._splu:
raise nx.NetworkXError('LU solver unavailable.')
self._LU = self._splu(A)
def solve(self, B):
B = asarray(B)
X = ndarray(B.shape, order='F')
for j in range(B.shape[1]):
X[:, j] = self._LU.solve(B[:, j])
return X
try:
from scipy.sparse.linalg import splu
_splu = partial(splu, permc_spec='MMD_AT_PLUS_A', diag_pivot_thresh=0.,
options={'Equil': True, 'SymmetricMode': True})
except ImportError:
_splu = None
def _preprocess_graph(G, weight):
"""Compute edge weights and eliminate zero-weight edges.
"""
if G.is_directed():
H = nx.MultiGraph()
H.add_nodes_from(G)
H.add_weighted_edges_from(((u, v, e.get(weight, 1.))
for u, v, e in G.edges(data=True)
if u != v), weight=weight)
G = H
if not G.is_multigraph():
edges = ((u, v, abs(e.get(weight, 1.)))
for u, v, e in G.edges(data=True) if u != v)
else:
edges = ((u, v, sum(abs(e.get(weight, 1.)) for e in G[u][v].values()))
for u, v in G.edges() if u != v)
H = nx.Graph()
H.add_nodes_from(G)
H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0)
return H
def _rcm_estimate(G, nodelist):
"""Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.
"""
G = G.subgraph(nodelist)
order = reverse_cuthill_mckee_ordering(G)
n = len(nodelist)
index = dict(zip(nodelist, range(n)))
x = ndarray(n, dtype=float)
for i, u in enumerate(order):
x[index[u]] = i
x -= (n - 1) / 2.
return x
def _tracemin_fiedler(L, X, normalized, tol, method):
"""Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
"""
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = sqrt(L.diagonal())
D = spdiags(1. / e, [0], n, n, format='csr')
L = D * L * D
e *= 1. / norm(e, 2)
if not normalized:
def project(X):
"""Make X orthogonal to the nullspace of L.
"""
X = asarray(X)
for j in range(X.shape[1]):
X[:, j] -= X[:, j].sum() / n
else:
def project(X):
"""Make X orthogonal to the nullspace of L.
"""
X = asarray(X)
for j in range(X.shape[1]):
X[:, j] -= dot(X[:, j], e) * e
if method is None:
method = 'pcg'
if method == 'pcg':
# See comments below for the semantics of P and D.
def P(x):
x -= asarray(x * X * X.T)[0, :]
if not normalized:
x -= x.sum() / n
else:
x = daxpy(e, x, a=-ddot(x, e))
return x
solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x)
elif method == 'chol' or method == 'lu':
# Convert A to CSC to suppress SparseEfficiencyWarning.
A = csc_matrix(L, dtype=float, copy=True)
# Force A to be nonsingular. Since A is the Laplacian matrix of a
# connected graph, its rank deficiency is one, and thus one diagonal
# element needs to modified. Changing to infinity forces a zero in the
# corresponding element in the solution.
i = (A.indptr[1:] - A.indptr[:-1]).argmax()
A[i, i] = float('inf')
solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A)
else:
raise nx.NetworkXError('unknown linear system solver.')
# Initialize.
Lnorm = abs(L).sum(axis=1).flatten().max()
project(X)
W = asmatrix(ndarray(X.shape, order='F'))
while True:
# Orthonormalize X.
X = qr(X)[0]
# Compute interation matrix H.
W[:, :] = L * X
H = X.T * W
sigma, Y = eigh(H, overwrite_a=True)
# Compute the Ritz vectors.
X *= Y
# Test for convergence exploiting the fact that L * X == W * Y.
res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
if res < tol:
break
# Depending on the linear solver to be used, two mathematically
# equivalent formulations are used.
if method == 'pcg':
# Compute X = X - (P * L * P) \ (P * L * X) where
# P = I - [e X] * [e X]' is a projection onto the orthogonal
# complement of [e X].
W *= Y # L * X == W * Y
W -= (W.T * X * X.T).T
project(W)
# Compute the diagonal of P * L * P as a Jacobi preconditioner.
D = L.diagonal()
D += 2. * (asarray(X) * asarray(W)).sum(axis=1)
D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1)
D[D < tol * Lnorm] = 1.
D = 1. / D
# Since TraceMIN is globally convergent, the relative residual can
# be loose.
X -= solver.solve(W, 0.1)
else:
# Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary
# projection on the nullspace of L, which will be eliminated.
W[:, :] = solver.solve(X)
project(W)
X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order.
return sigma, asarray(X)
def _get_fiedler_func(method):
"""Return a function that solves the Fiedler eigenvalue problem.
"""
match = _tracemin_method.match(method)
if match:
method = match.group(1)
def find_fiedler(L, x, normalized, tol):
q = 2 if method == 'pcg' else min(4, L.shape[0] - 1)
X = asmatrix(normal(size=(q, L.shape[0]))).T
sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)
return sigma[0], X[:, 0]
elif method == 'lanczos' or method == 'lobpcg':
def find_fiedler(L, x, normalized, tol):
L = csc_matrix(L, dtype=float)
n = L.shape[0]
if normalized:
D = spdiags(1. / sqrt(L.diagonal()), [0], n, n, format='csc')
L = D * L * D
if method == 'lanczos' or n < 10:
# Avoid LOBPCG when n < 10 due to
# https://github.com/scipy/scipy/issues/3592
# https://github.com/scipy/scipy/pull/3594
sigma, X = eigsh(L, 2, which='SM', tol=tol,
return_eigenvectors=True)
return sigma[1], X[:, 1]
else:
X = asarray(asmatrix(x).T)
M = spdiags(1. / L.diagonal(), [0], n, n)
Y = ones(n)
if normalized:
Y /= D.diagonal()
sigma, X = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol,
maxiter=n, largest=False)
return sigma[0], X[:, 0]
else:
raise nx.NetworkXError("unknown method '%s'." % method)
return find_fiedler
@not_implemented_for('directed')
def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-8,
method='tracemin'):
"""Return the algebraic connectivity of an undirected graph.
The algebraic connectivity of a connected undirected graph is the second
smallest eigenvalue of its Laplacian matrix.
Parameters
----------
G : NetworkX graph
An undirected graph.
weight : object, optional
The data key used to determine the weight of each edge. If None, then
each edge has unit weight. Default value: None.
normalized : bool, optional
Whether the normalized Laplacian matrix is used. Default value: False.
tol : float, optional
Tolerance of relative residual in eigenvalue computation. Default
value: 1e-8.
method : string, optional
Method of eigenvalue computation. It should be one of 'tracemin'
(TraceMIN), 'lanczos' (Lanczos iteration) and 'lobpcg' (LOBPCG).
Default value: 'tracemin'.
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_chol' Cholesky factorization
'tracemin_lu' LU factorization
=============== ========================================
Returns
-------
algebraic_connectivity : float
Algebraic connectivity.
Raises
------
NetworkXNotImplemented
If G is directed.
NetworkXError
If G has less than two nodes.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
To use Cholesky factorization in the TraceMIN algorithm, the
:samp:`scikits.sparse` package must be installed.
See Also
--------
laplacian_matrix
"""
if len(G) < 2:
raise nx.NetworkXError('graph has less than two nodes.')
G = _preprocess_graph(G, weight)
if not nx.is_connected(G):
return 0.
L = nx.laplacian_matrix(G)
if L.shape[0] == 2:
return 2. * L[0, 0] if not normalized else 2.
find_fiedler = _get_fiedler_func(method)
x = None if method != 'lobpcg' else _rcm_estimate(G, G)
return find_fiedler(L, x, normalized, tol)[0]
@not_implemented_for('directed')
def fiedler_vector(G, weight='weight', normalized=False, tol=1e-8,
method='tracemin'):
"""Return the Fiedler vector of a connected undirected graph.
The Fiedler vector of a connected undirected graph is the eigenvector
corresponding to the second smallest eigenvalue of the Laplacian matrix of
of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
weight : object, optional
The data key used to determine the weight of each edge. If None, then
each edge has unit weight. Default value: None.
normalized : bool, optional
Whether the normalized Laplacian matrix is used. Default value: False.
tol : float, optional
Tolerance of relative residual in eigenvalue computation. Default
value: 1e-8.
method : string, optional
Method of eigenvalue computation. It should be one of 'tracemin'
(TraceMIN), 'lanczos' (Lanczos iteration) and 'lobpcg' (LOBPCG).
Default value: 'tracemin'.
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_chol' Cholesky factorization
'tracemin_lu' LU factorization
=============== ========================================
Returns
-------
fiedler_vector : NumPy array of floats.
Fiedler vector.
Raises
------
NetworkXNotImplemented
If G is directed.
NetworkXError
If G has less than two nodes or is not connected.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
To use Cholesky factorization in the TraceMIN algorithm, the
:samp:`scikits.sparse` package must be installed.
See Also
--------
laplacian_matrix
"""
if len(G) < 2:
raise nx.NetworkXError('graph has less than two nodes.')
G = _preprocess_graph(G, weight)
if not nx.is_connected(G):
raise nx.NetworkXError('graph is not connected.')
if len(G) == 2:
return array([1., -1.])
find_fiedler = _get_fiedler_func(method)
L = nx.laplacian_matrix(G)
x = None if method != 'lobpcg' else _rcm_estimate(G, G)
return find_fiedler(L, x, normalized, tol)[1]
def spectral_ordering(G, weight='weight', normalized=False, tol=1e-8,
method='tracemin'):
"""Compute the spectral_ordering of a graph.
The spectral ordering of a graph is an ordering of its nodes where nodes
in the same weakly connected components appear contiguous and ordered by
their corresponding elements in the Fiedler vector of the component.
Parameters
----------
G : NetworkX graph
A graph.
weight : object, optional
The data key used to determine the weight of each edge. If None, then
each edge has unit weight. Default value: None.
normalized : bool, optional
Whether the normalized Laplacian matrix is used. Default value: False.
tol : float, optional
Tolerance of relative residual in eigenvalue computation. Default
value: 1e-8.
method : string, optional
Method of eigenvalue computation. It should be one of 'tracemin'
(TraceMIN), 'lanczos' (Lanczos iteration) and 'lobpcg' (LOBPCG).
Default value: 'tracemin'.
The TraceMIN algorithm uses a linear system solver. The following
values allow specifying the solver to be used.
=============== ========================================
Value Solver
=============== ========================================
'tracemin_pcg' Preconditioned conjugate gradient method
'tracemin_chol' Cholesky factorization
'tracemin_lu' LU factorization
=============== ========================================
Returns
-------
spectral_ordering : NumPy array of floats.
Spectral ordering of nodes.
Raises
------
NetworkXError
If G is empty.
Notes
-----
Edge weights are interpreted by their absolute values. For MultiGraph's,
weights of parallel edges are summed. Zero-weighted edges are ignored.
To use Cholesky factorization in the TraceMIN algorithm, the
:samp:`scikits.sparse` package must be installed.
See Also
--------
laplacian_matrix
"""
if len(G) == 0:
raise nx.NetworkXError('graph is empty.')
G = _preprocess_graph(G, weight)
find_fiedler = _get_fiedler_func(method)
order = []
for component in nx.connected_components(G):
size = len(component)
if size > 2:
L = nx.laplacian_matrix(G, component)
x = None if method != 'lobpcg' else _rcm_estimate(G, component)
fiedler = find_fiedler(L, x, normalized, tol)[1]
order.extend(
u for x, c, u in sorted(zip(fiedler, range(size), component)))
else:
order.extend(component)
return order
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy.sparse
except ImportError:
raise SkipTest('SciPy not available.')
|
py | b40fe77a92dc55241779c38ef9ee3c5835ffebee | # coding: utf-8
a_list = [3, 4, -2, -30, 14, 9.3, 3.4]
# 对列表元素排序
a_list.sort()
print(a_list) # [-30, -2, 3, 3.4, 4, 9.3, 14]
b_list = ['Python', 'Swift', 'Ruby', 'Go', 'Kotlin', 'Erlang']
# 对列表元素排序:默认按字符串包含的字符的编码大小比较
b_list.sort()
print(b_list) # ['Erlang', 'Go', 'Kotlin', 'Python', 'Ruby', 'Swift']
# 指定key为len,指定使用len函数对集合元素生成比较的键,
# 也就是按字符串的长度比较大小
b_list.sort(key=len)
print(b_list) # ['Go', 'Ruby', 'Swift', 'Erlang', 'Kotlin', 'Python']
# 指定反向排序
b_list.sort(key=len, reverse=True)
print(b_list) # ['Erlang', 'Kotlin', 'Python', 'Swift', 'Ruby', 'Go']
# 以下代码只能在Python 2.x中执行
# 定义一个根据长度比较大小的比较函数
def len_cmp(x, y):
# 下面代码比较大小的逻辑是:长度大的字符串就算更大
return 1 if len(x) > len(y) else (-1 if len(x) < len(y) else 0)
b_list.sort(len_cmp)
print(b_list) # ['Go', 'Ruby', 'Swift', 'Erlang', 'Kotlin', 'Python']
|
py | b40fe7c724478e1e522f3725fd0d12eae00a466f | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import json
from chalice import Response
from chalicelib import common, template
from chalicelib.common import app
stage = os.environ.get('STAGE', 'dev')
if stage == 'local':
# ローカル時のみ活用
from chalicelib import staticfiles # noqa
def html_render(template_path, **params):
''' HTMLのレンダリングレスポンスを返します '''
tpl = template.get(template_path)
return Response(
status_code=200,
headers={'Content-Type': 'text/html'},
body=tpl.render(**params))
@app.route('/')
def index():
''' トップページを返す '''
req = json.dumps(app.current_request.to_dict(), indent=4)
return html_render('index.tpl', req=req)
def _search(database, keyword):
''' 検索の実体 '''
if keyword:
# 入力あり
return [e for e in database if keyword in e]
else:
# 入力なし
return database
@app.route('/search', methods=['POST'],
content_types=common.post_content_types)
def search():
''' 検索する '''
# database はサンプル
database = ['C', 'C++', 'Java', 'Perl', 'PHP', 'Ruby', 'Python']
params = common.post_params()
results = _search(database, params.get('keyword'))
return html_render('search.tpl', results=results)
|
py | b40fe89dc0f0d599e3f7558af830ce29754fa920 | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StackDescriptor(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version': 'str',
'min_ambari': 'str',
'repo': 'StackRepoDetailsJson',
'mpacks': 'dict(str, list[ManagementPackEntry])',
'ambari': 'AmbariInfoJson'
}
attribute_map = {
'version': 'version',
'min_ambari': 'minAmbari',
'repo': 'repo',
'mpacks': 'mpacks',
'ambari': 'ambari'
}
def __init__(self, version=None, min_ambari=None, repo=None, mpacks=None, ambari=None):
"""
StackDescriptor - a model defined in Swagger
"""
self._version = None
self._min_ambari = None
self._repo = None
self._mpacks = None
self._ambari = None
if version is not None:
self.version = version
if min_ambari is not None:
self.min_ambari = min_ambari
if repo is not None:
self.repo = repo
if mpacks is not None:
self.mpacks = mpacks
if ambari is not None:
self.ambari = ambari
@property
def version(self):
"""
Gets the version of this StackDescriptor.
:return: The version of this StackDescriptor.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this StackDescriptor.
:param version: The version of this StackDescriptor.
:type: str
"""
self._version = version
@property
def min_ambari(self):
"""
Gets the min_ambari of this StackDescriptor.
:return: The min_ambari of this StackDescriptor.
:rtype: str
"""
return self._min_ambari
@min_ambari.setter
def min_ambari(self, min_ambari):
"""
Sets the min_ambari of this StackDescriptor.
:param min_ambari: The min_ambari of this StackDescriptor.
:type: str
"""
self._min_ambari = min_ambari
@property
def repo(self):
"""
Gets the repo of this StackDescriptor.
:return: The repo of this StackDescriptor.
:rtype: StackRepoDetailsJson
"""
return self._repo
@repo.setter
def repo(self, repo):
"""
Sets the repo of this StackDescriptor.
:param repo: The repo of this StackDescriptor.
:type: StackRepoDetailsJson
"""
self._repo = repo
@property
def mpacks(self):
"""
Gets the mpacks of this StackDescriptor.
:return: The mpacks of this StackDescriptor.
:rtype: dict(str, list[ManagementPackEntry])
"""
return self._mpacks
@mpacks.setter
def mpacks(self, mpacks):
"""
Sets the mpacks of this StackDescriptor.
:param mpacks: The mpacks of this StackDescriptor.
:type: dict(str, list[ManagementPackEntry])
"""
self._mpacks = mpacks
@property
def ambari(self):
"""
Gets the ambari of this StackDescriptor.
:return: The ambari of this StackDescriptor.
:rtype: AmbariInfoJson
"""
return self._ambari
@ambari.setter
def ambari(self, ambari):
"""
Sets the ambari of this StackDescriptor.
:param ambari: The ambari of this StackDescriptor.
:type: AmbariInfoJson
"""
self._ambari = ambari
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StackDescriptor):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b40fea044569df3ea74da670e3c3f51986c843b9 | """
Parse GenBank files. Biopython provides the core parsing functionality, but is not capable of producing a
hierarchical model. Thus this module does this, by depending on the ordering of the GenBank file.
There are two ways to infer hierarchy from GenBank files, that are not always followed.
The first (Model 1A) is sort order: so that it always goes
gene -> {mRNA, tRNA, rRNA} -> CDS (for coding genes only)
Each transcript feature can repeat. Each mRNA feature must be followed by a CDS feature.
The presence of a new gene feature is the divider between genes.
In some genomes (often Prokaryotic), there is no transcript level feature for coding genes.
That is, it goes from gene -> CDS. This is Model 1B.
The second way that a GenBank file can be grouped is via the locus_tag qualifiers. This method is the
default for this parsing module. This does not work for genomes with alternative isoforms.
The generic parsing function that interprets the BioPython results to BioCantor data models is implemented in
:meth:`GeneFeature.to_gene_model()`. This function can be over-ridden to provide custom parsing implementations.
"""
import itertools
import pathlib
import warnings
from abc import ABC
from collections import Counter
from copy import deepcopy
from typing import Optional, TextIO, Iterator, List, Dict, Callable, Tuple, Any, Union
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature
from Bio.SeqRecord import SeqRecord
from inscripta.biocantor.gene import Biotype, CDSInterval, CDSFrame
from inscripta.biocantor.io.exc import StrandViolationWarning
from inscripta.biocantor.io.features import extract_feature_types, extract_feature_name_id, merge_qualifiers
from inscripta.biocantor.io.genbank.constants import (
GeneFeatures,
TranscriptFeatures,
GeneIntervalFeatures,
MetadataFeatures,
GenBankParserType,
KnownQualifiers,
GENBANK_GENE_FEATURES,
)
from inscripta.biocantor.io.exc import DuplicateSequenceException
from inscripta.biocantor.io.genbank.exc import (
GenBankParserError,
EmptyGenBankError,
GenBankLocusTagError,
GenBankLocationException,
GenBankNullStrandException,
)
from inscripta.biocantor.io.models import (
GeneIntervalModel,
AnnotationCollectionModel,
FeatureIntervalCollectionModel,
)
from inscripta.biocantor.io.parser import ParsedAnnotationRecord
from inscripta.biocantor.location import (
Location,
Strand,
SingleInterval,
CompoundInterval,
EmptyLocation,
)
class Feature(ABC):
"""Generic feature."""
types = set()
def __init__(self, feature: SeqFeature, record: SeqRecord):
if feature.type not in self.types:
raise GenBankParserError(f"Invalid feature type {feature.type}")
if feature.location is None:
raise GenBankLocationException(f"Feature {feature} did not have parseable coordinates.")
if not feature.strand:
raise GenBankNullStrandException(f"Feature {feature} is unstranded or has multiple strands.")
self.feature = feature
self.record = record
self.children = []
def __iter__(self) -> Iterator[SeqFeature]:
"""Pre-order depth first traversal of this feature."""
yield self
for child in self.children:
yield from child
def __repr__(self):
return "\n".join((str(x) for x in self))
@property
def type(self) -> str:
return str(self.feature.type)
@property
def has_children(self) -> bool:
return len(self.children) > 0
@property
def strand(self) -> int:
return self.feature.strand
class FeatureIntervalGenBankCollection:
"""A collection of generic (non-transcribed) feature intervals."""
def __init__(self, features: List[SeqFeature], record: SeqRecord):
"""
Build a generic feature from a grouping of features found in a genbank parsing event.
Args:
features: One or more ``SeqFeature``s found in a GenBank file that are associated together, but which
could not be interpreted as a gene.
record: The ``SeqRecord`` these features were found on.
"""
for feature in features:
if feature.location is None:
raise GenBankLocationException(f"Feature {feature} did not have parseable coordinates.")
self.types = {feature.type for feature in features}
self.record = record
self.features = features
@staticmethod
def to_feature_model(cls: "FeatureIntervalGenBankCollection") -> Dict[str, Any]:
"""Convert to a Dict representation of a :class:`biocantor.gene.collections.FeatureIntervalCollection`
that can be used for analyses.
This is the default function, that can be over-ridden by specific implementations.
Looks for identifiers in the hierarchy defined by the Enum
:class:`biocantor.io.genbank.constants.FeatureIntervalIdentifierKeys`.
The feature collection produced will be named either the locus tag if provided, and otherwise by definition of
the parser we have only one feature, so the first name is chosen.
"""
features = []
feature_names = Counter()
feature_ids = Counter()
locus_tag = None
merged_qualifiers = {}
for feature in cls.features:
interval_starts = []
interval_ends = []
for loc in sorted(feature.location.parts, key=lambda p: p.start):
interval_starts.append(loc.nofuzzy_start)
interval_ends.append(loc.nofuzzy_end)
strand = Strand.from_int(feature.location.strand)
# extract feature types, including the base type
feature_types = {feature.type}
extract_feature_types(feature_types, feature.qualifiers)
# extract primary identifier
feature_name, feature_id = extract_feature_name_id(feature.qualifiers)
# keep track of feature names seen to identify consensus feature name for collection
if feature_name:
feature_names[feature_name] += 1
if feature_id:
feature_ids[feature_id] += 1
# try to find a locus tag
if KnownQualifiers.LOCUS_TAG.value in feature.qualifiers:
locus_tag = feature.qualifiers[KnownQualifiers.LOCUS_TAG.value][0]
features.append(
dict(
interval_starts=interval_starts,
interval_ends=interval_ends,
strand=strand.name,
qualifiers=feature.qualifiers,
feature_id=feature_id,
feature_name=feature_name,
feature_types=sorted(feature_types),
sequence_name=cls.record.id,
is_primary_feature=False,
)
)
merged_qualifiers = merge_qualifiers(merged_qualifiers, feature.qualifiers)
if len(feature_names) > 0:
feature_name = feature_names.most_common(1)[0][0]
else:
feature_name = locus_tag
if len(feature_ids) > 0:
feature_id = feature_ids.most_common(1)[0][0]
else:
feature_id = locus_tag
feature_collection = FeatureIntervalCollectionModel.Schema().load(
dict(
feature_intervals=features,
feature_collection_name=feature_name,
feature_collection_id=feature_id,
locus_tag=locus_tag,
qualifiers=merged_qualifiers,
sequence_name=cls.record.id,
)
)
# construct a FeatureIntervalCollection to run validations
feature_collection = feature_collection.to_feature_collection()
return feature_collection.to_dict()
class GeneFeature(Feature):
"""A gene."""
types = {x.value for x in GeneFeatures}
def __str__(self):
return self.feature.__repr__()
@staticmethod
def from_transcript_or_cds_feature(feature: SeqFeature, seqrecord: SeqRecord) -> "GeneFeature":
"""Some GenBank files lack a gene-level feature, but have transcript-level features or CDS-level features only.
Construct a GeneFeature from such records."""
old_type = feature.type
feature.type = GeneFeatures.GENE.value
gene = GeneFeature(feature, seqrecord)
feature.type = old_type
gene.add_child(feature)
return gene
def add_child(self, feature: SeqFeature):
"""Add a new feature as a child. Infer Transcripts if this child is a CDS or exon feature."""
if feature.type in TranscriptFeature.types:
self.children.append(TranscriptFeature(feature, self.record))
elif feature.type in IntervalFeature.types:
# infer a transcript
tx_feature = deepcopy(feature)
if tx_feature.type == GeneIntervalFeatures.CDS.value:
tx_feature.type = TranscriptFeatures.CODING_TRANSCRIPT.value
# this means we have an exon as a direct child of a gene
elif tx_feature.type == GeneIntervalFeatures.EXON.value:
tx_feature.type = TranscriptFeatures.CODING_TRANSCRIPT.value
else:
tx_feature.type = TranscriptFeatures[tx_feature.type].value
tx = TranscriptFeature(tx_feature, self.record)
self.children.append(tx)
tx.add_child(feature)
else:
raise GenBankParserError(f"Invalid feature type {feature.type}")
def finalize(self):
"""Make sure we have a full hierarchy; infer children if necessary.
This is often needed for non-coding genes which lack an explicit exon.
"""
for transcript in self.children:
transcript.infer_exon_features()
@staticmethod
def to_gene_model(cls: "GeneFeature") -> Dict[str, Any]:
"""Convert to a Dict representation of a :class:`biocantor.gene.collections.GeneInterval`
that can be used for analyses.
This is the default function, that can be over-ridden by specific implementations.
Looks for /transcript_id, /protein_id, and /gene on the transcript level, and
looks for /gene_id, /gene, and /locus_tag on the gene level.
"""
transcripts = []
tx_biotypes = Counter()
for tx in cls.children:
exon_starts = []
exon_ends = []
for start, end in tx.iterate_intervals():
exon_starts.append(start)
exon_ends.append(end)
strand = Strand.from_int(tx.strand)
exon_interval = CompoundInterval(exon_starts, exon_ends, strand)
cds_interval = tx.find_cds_interval(exon_interval)
if cds_interval.is_empty:
cds_starts = None
cds_ends = None
cds_frames = []
else:
cds_starts = []
cds_ends = []
for block in cds_interval.blocks:
cds_starts.append(block.start)
cds_ends.append(block.end)
cds_frames = tx.construct_frames(cds_interval)
if "pseudo" in tx.feature.qualifiers:
transcript_biotype = Biotype.pseudogene
elif tx.feature.type == TranscriptFeatures.CODING_TRANSCRIPT.value:
transcript_biotype = Biotype.protein_coding
else:
transcript_biotype = Biotype[tx.feature.type]
tx_biotypes[transcript_biotype] += 1
tx_model = dict(
exon_starts=exon_starts,
exon_ends=exon_ends,
strand=strand.name,
cds_starts=cds_starts,
cds_ends=cds_ends,
cds_frames=cds_frames,
qualifiers=tx.merge_cds_qualifiers_to_transcript(),
is_primary_tx=False,
transcript_id=tx.get_qualifier_from_tx_or_cds_features(KnownQualifiers.TRANSCRIPT_ID.value),
protein_id=tx.get_qualifier_from_tx_or_cds_features(KnownQualifiers.PROTEIN_ID.value),
product=tx.get_qualifier_from_tx_or_cds_features(KnownQualifiers.PRODUCT.value),
transcript_symbol=tx.get_qualifier_from_tx_or_cds_features(KnownQualifiers.GENE.value),
transcript_type=transcript_biotype.name,
sequence_name=tx.record.id,
)
transcripts.append(tx_model)
# pick most common transcript type; hacky
gene_biotype = tx_biotypes.most_common(1)[0][0]
gene = GeneIntervalModel.Schema().load(
dict(
transcripts=transcripts,
gene_id=cls.feature.qualifiers.get(KnownQualifiers.GENE_ID.value, [None])[0],
gene_symbol=cls.feature.qualifiers.get(KnownQualifiers.GENE.value, [None])[0],
locus_tag=cls.feature.qualifiers.get(KnownQualifiers.LOCUS_TAG.value, [None])[0],
gene_type=gene_biotype.name,
qualifiers=cls.feature.qualifiers,
sequence_name=cls.record.id,
)
)
# construct a GeneInterval to run validations
gene = gene.to_gene_interval()
return gene.to_dict()
class TranscriptFeature(Feature):
"""A transcript"""
types = {x.value for x in TranscriptFeatures}
def __str__(self):
return f"--> {self.feature.__repr__()}"
def add_child(self, feature: SeqFeature):
self.children.append(IntervalFeature(feature, self.record))
def infer_exon_features(self):
"""Commonly, non-coding genes lack IntervalFeatures, coding genes only have CDS features"""
# no children means this is likely a non-coding transcript with no exon features
# no exon features means this is likely a coding transcript with no exon features
if len(self.children) == 0 or len(list(self.exon_features)) == 0:
# add an exon with the same interval as the transcript
feature = deepcopy(self.feature)
feature.type = GeneIntervalFeatures.EXON.value
self.add_child(feature)
def construct_frames(self, cds_interval: Location) -> List[str]:
"""We need to build frames. Since GenBank lacks this info, do our best"""
# make 0 based offset, if possible, otherwise assume always in frame
frame = int(self.children[0].feature.qualifiers.get(KnownQualifiers.CODON_START.value, [1])[0]) - 1
frame = CDSFrame.from_int(frame)
frames = CDSInterval.construct_frames_from_location(cds_interval, frame)
return [x.name for x in frames]
@property
def exon_features(self) -> SeqFeature:
for f in self.children:
if f.type == GeneIntervalFeatures.EXON.value:
yield f
@property
def cds_features(self) -> SeqFeature:
for f in self.children:
if f.type == GeneIntervalFeatures.CDS.value:
yield f
def get_qualifier_from_tx_or_cds_features(self, qualifier: str) -> Optional[str]:
"""Get a specific qualifier, if it exists. Look at tx first, then children"""
if qualifier in self.feature.qualifiers:
return self.feature.qualifiers[qualifier][0]
for feature in self.cds_features:
if qualifier in feature.feature.qualifiers:
return feature.feature.qualifiers[qualifier][0]
def iterate_intervals(self) -> Iterator[Tuple[int, int]]:
"""Iterate over the location parts"""
for exon in sorted(self.exon_features, key=lambda e: e.feature.location.nofuzzy_start):
for part in sorted(exon.feature.location.parts, key=lambda p: p.start):
yield int(part.start), int(part.end)
def find_cds_interval(self, exon_interval: Location) -> Location:
cds = sorted(self.cds_features, key=lambda c: c.feature.location.nofuzzy_start)
if len(cds) == 0:
return EmptyLocation()
cds_i = SingleInterval(
cds[0].feature.location.nofuzzy_start,
cds[-1].feature.location.nofuzzy_end,
Strand.from_int(self.strand),
)
return exon_interval.intersection(cds_i)
def merge_cds_qualifiers_to_transcript(self) -> Dict[str, List[str]]:
"""
If there were distinct transcript-level features, the qualifiers on the CDS feature will be lost
when converting to the BioCantor data model unless those qualifiers are rolled into the qualifiers on
the transcript feature.
"""
qualifiers = {key: set(vals) for key, vals in self.feature.qualifiers.items()}
for cds_feature in self.cds_features:
for key, vals in cds_feature.feature.qualifiers.items():
if key not in qualifiers:
qualifiers[key] = set(vals)
else:
qualifiers[key].update(vals)
return {key: list(vals) for key, vals in qualifiers.items()}
class IntervalFeature(Feature):
"""A set of intervals"""
types = {"CDS", "exon"}
def __str__(self):
return f"----> {self.feature.__repr__()}"
def _construct_gene_from_feature(
feature: SeqFeature,
seqrecord: SeqRecord,
cls_or_fn: Callable[[SeqFeature, SeqRecord], GeneFeature],
) -> Optional[GeneFeature]:
"""
Convenience function for producing :class:`GeneFeature` from a `SeqFeature`, handling both
possible constructor routes (construction from a ``gene`` feature as found in the GenBank,
or inference from a transcript/interval level feature in case no ``gene`` level feature was found).
This wrapper function catches exceptions raised for common errors, and converts them to warnings
as appropriate.
"""
try:
return cls_or_fn(feature, seqrecord)
except GenBankNullStrandException:
warnings.warn(
StrandViolationWarning(f"Found multiple strands for feature {feature}. This feature will be skipped.")
)
def _construct_feature_collection_from_features(
features: List[SeqFeature],
seqrecord: SeqRecord,
) -> Optional[FeatureIntervalGenBankCollection]:
"""
Convenience function for producing :class:`FeatureIntervalGenBankCollection` from a `SeqFeature`.
This wrapper function catches exceptions raised for common errors, and converts them to warnings
as appropriate.
"""
try:
return FeatureIntervalGenBankCollection(features, seqrecord)
except GenBankNullStrandException:
warnings.warn(
StrandViolationWarning(
f"Found multiple strands for feature group {features}. " f"This feature collection will be skipped."
)
)
def parse_genbank(
genbank_handle_or_path: Union[TextIO, str, pathlib.Path],
parse_func: Optional[Callable[[GeneFeature], Dict[str, Any]]] = GeneFeature.to_gene_model,
feature_parse_func: Optional[
Callable[[FeatureIntervalGenBankCollection], Dict[str, Any]]
] = FeatureIntervalGenBankCollection.to_feature_model,
gbk_type: Optional[GenBankParserType] = GenBankParserType.LOCUS_TAG,
) -> Iterator[ParsedAnnotationRecord]:
"""This is the main GenBank parsing function. The parse function implemented in :class:`GeneFeature` can be
over-ridden to provide a custom implementation.
Args:
genbank_handle_or_path: An open GenBank file or a path to a locally stored GenBank file.
parse_func: Optional parse function implementation.
feature_parse_func: Optional feature interval parse function implementation.
gbk_type: Do we want to use model 1 or model 2? Must be one of ``sorted``, ``locus_tag``.
Yields:
:class:`ParsedAnnotationRecord`.
"""
seq_records = list(SeqIO.parse(genbank_handle_or_path, format="genbank"))
seqrecords_dict = {}
for rec in seq_records:
if rec.id in seqrecords_dict:
raise DuplicateSequenceException(f"Sequence {rec.id} found twice in GenBank file.")
seqrecords_dict[rec.id] = rec
if gbk_type == GenBankParserType.SORTED:
gene_records = group_gene_records_from_sorted_genbank(seq_records, parse_func, feature_parse_func)
else:
gene_records = group_gene_records_by_locus_tag(seq_records, parse_func, feature_parse_func, gbk_type)
yield from gene_records
def group_gene_records_from_sorted_genbank(
record_iter: Iterator[SeqRecord],
parse_func: Callable[[GeneFeature], Dict[str, Any]],
feature_parse_func: Callable[[FeatureIntervalGenBankCollection], Dict[str, Any]],
) -> Iterator[ParsedAnnotationRecord]:
"""Model 1: position sorted GenBank.
This function looks for canonical gene records:
gene -> Optional(mRNA) -> CDS records
It also looks for canonical non-coding records:
gene -> {misc_RNA,tRNA,rRNA,etc)
It also will infer non-canonical record types, including non-coding transcripts and coding genes
from isolated CDS/non-coding features (those without a gene feature before them in the sort order).
Any features that do not fit the above bins are interpreted as generic features.
Some GenBank files are improperly ordered, and will have things like the CDS feature first, or the mRNA feature
first. To try and capture this, the full set of records are sorted first by position, then in the order:
gene
mRNA
CDS
exon
anything else
Args:
record_iter: Iterator of SeqRecord objects.
parse_func: Optional parse function implementation.
feature_parse_func: Optional feature interval parse function implementation.
Yields:
:class:`ParsedAnnotationRecord`.
"""
tot_genes = 0
tot_features = 0
for seqrecord in record_iter:
gene = None
source = None
genes = []
# capture non-gene intervals downstream
feature_features = []
# sort features to try to capture weirdly ordered genbank files
sorted_features = sorted(
seqrecord.features,
key=lambda x: (
x.location.nofuzzy_start,
x.type != GeneFeatures.GENE.value,
x.type != TranscriptFeatures.CODING_TRANSCRIPT.value,
x.type != GeneIntervalFeatures.CDS.value,
x.type != GeneIntervalFeatures.EXON.value,
),
)
for feature in sorted_features:
# try to capture the Source field, if it exists
if feature.type == MetadataFeatures.SOURCE.value:
source = feature
# base case for start; iterate until we find a gene
elif gene is None:
if feature.type in GeneFeature.types:
gene = _construct_gene_from_feature(feature, seqrecord, GeneFeature)
# gene is None if it was not parseable
if not gene:
continue
# base case for starting with a isolated ncRNA or CDS feature; immediately add them
# and reset the gene to None
elif feature.type in TranscriptFeature.types or feature.type in IntervalFeature.types:
gene = _construct_gene_from_feature(feature, seqrecord, GeneFeature.from_transcript_or_cds_feature)
# gene is None if it was not parseable
if gene:
gene.finalize()
gene = parse_func(gene)
genes.append(gene)
gene = None
# this must be a generic feature
else:
feature_features.append(feature)
# next gene; re-set the gene object and report out the collection
elif feature.type in GeneFeature.types:
if gene.has_children:
gene.finalize()
gene = parse_func(gene)
genes.append(gene)
gene = _construct_gene_from_feature(feature, seqrecord, GeneFeature)
if not gene:
continue
elif feature.type in TranscriptFeature.types:
# if the current gene is non-empty, and the feature is not a mRNA, then this is a isolated ncRNA
# finish this gene and start a new one
if feature.type != TranscriptFeatures.CODING_TRANSCRIPT and gene.has_children:
gene.finalize()
gene = parse_func(gene)
genes.append(gene)
gene = _construct_gene_from_feature(feature, seqrecord, GeneFeature.from_transcript_or_cds_feature)
# gene is None if it was not parseable
if not gene:
continue
else:
gene.add_child(feature)
elif feature.type in IntervalFeature.types:
if not gene.has_children:
gene.add_child(feature)
else:
gene.children[-1].add_child(feature)
else:
feature_features.append(feature)
# gene could be None if this record has no annotations
if gene is not None and gene.has_children:
gene.finalize()
gene = parse_func(gene)
genes.append(gene)
if source is not None:
source_qualifiers = source.qualifiers
else:
source_qualifiers = None
feature_collections = _extract_generic_features(seqrecord, feature_features, feature_parse_func)
tot_features += len(feature_collections) if feature_collections else 0
tot_genes += len(genes) if genes else 0
annotation = AnnotationCollectionModel.Schema().load(
dict(
genes=genes,
feature_collections=feature_collections,
sequence_name=seqrecord.id,
start=0,
end=len(seqrecord),
qualifiers=source_qualifiers,
)
)
yield ParsedAnnotationRecord(annotation=annotation, seqrecord=seqrecord)
if tot_genes + tot_features == 0:
raise EmptyGenBankError("GenBank parsing produced zero genes and zero features.")
def group_gene_records_by_locus_tag(
record_iter: Iterator[SeqRecord],
parse_func: Callable[[GeneFeature], Dict[str, Any]],
feature_parse_func: Callable[[FeatureIntervalGenBankCollection], Dict[str, Any]],
genbank_parser_type: GenBankParserType = GenBankParserType.LOCUS_TAG,
) -> Iterator[ParsedAnnotationRecord]:
"""Model 2: ``locus_tag`` defined GenBank.
All feature types that qualify within the hierarchical structure, possess a locus_tag, and whose feature type
are valid for a known transcribed interval type, will be included in the gene parsing.
All other feature types will become generic features (FeatureIntervals), unless we are in hybrid mode.
In hybrid mode, locus_tag is used first, then all of the remaining features are sent to the
sorted parser.
Args:
record_iter: Iterator of SeqRecord objects.
parse_func: Optional parse function implementation.
feature_parse_func: Optional feature interval parse function implementation.
genbank_parser_type: Optional parser type. Changing this to GenBankParserType.HYBRID
will enable hybrid parsing mode.
Yields:
:class:`ParsedAnnotationRecord`.
"""
if genbank_parser_type not in [GenBankParserType.LOCUS_TAG, GenBankParserType.HYBRID]:
raise GenBankParserError("Must use either locus_tag or hybrid")
tot_genes = 0
tot_features = 0
for seqrecord in record_iter:
gene_filtered_features = []
remaining_features = []
source = None
for f in seqrecord.features:
if f.type in GENBANK_GENE_FEATURES and KnownQualifiers.LOCUS_TAG.value in f.qualifiers:
gene_filtered_features.append(f)
elif f.type == MetadataFeatures.SOURCE.value:
source = f
else:
remaining_features.append(f)
sorted_gene_filtered_features = sorted(
gene_filtered_features, key=lambda f: f.qualifiers[KnownQualifiers.LOCUS_TAG.value]
)
genes = []
for locus_tag, gene_features in itertools.groupby(
sorted_gene_filtered_features, key=lambda f: f.qualifiers[KnownQualifiers.LOCUS_TAG.value][0]
):
# sort the features for this locus tag to bubble the "gene" feature to the top, if it exists
gene_features = sorted(gene_features, key=lambda f: f.type != GeneFeatures.GENE.value)
# do we have more than one gene with this locus_tag?
if len(gene_features) > 1 and gene_features[1].type == GeneFeatures.GENE.value:
raise GenBankLocusTagError(
f"Grouping by locus tag found multiple gene features with the same locus tag:"
f"\n{gene_features[0]}\n{gene_features[1]}"
)
gene_feature = gene_features[0]
if gene_feature.type == GeneFeatures.GENE.value:
gene = _construct_gene_from_feature(gene_feature, seqrecord, GeneFeature)
else:
gene = _construct_gene_from_feature(gene_feature, seqrecord, GeneFeature.from_transcript_or_cds_feature)
# gene is None if it was not parseable
if not gene:
continue
for feature in gene_features[1:]:
if feature.type in TranscriptFeature.types:
gene.add_child(feature)
elif feature.type in IntervalFeature.types:
if len(gene.children) == 0:
gene.add_child(feature)
else:
gene.children[-1].add_child(feature)
if gene.has_children:
gene.finalize()
gene = parse_func(gene)
genes.append(gene)
if source is not None:
source_qualifiers = source.qualifiers
else:
source_qualifiers = None
if genbank_parser_type == GenBankParserType.LOCUS_TAG:
feature_collections = _extract_generic_features(seqrecord, remaining_features, feature_parse_func)
else:
# hybrid parsing mode
tmp_seqrecord = deepcopy(seqrecord)
tmp_seqrecord.features = remaining_features
tmp_annotation = next(
group_gene_records_from_sorted_genbank((tmp_seqrecord,), parse_func, feature_parse_func)
)
if tmp_annotation.annotation.feature_collections:
feature_collections = [
FeatureIntervalCollectionModel.Schema().dump(x)
for x in tmp_annotation.annotation.feature_collections
]
else:
feature_collections = None
if tmp_annotation.annotation.genes:
genes.extend([GeneIntervalModel.Schema().dump(x) for x in tmp_annotation.annotation.genes])
tot_features += len(feature_collections) if feature_collections else 0
tot_genes += len(genes) if genes else 0
annotation = AnnotationCollectionModel.Schema().load(
dict(
genes=genes,
feature_collections=feature_collections,
name=seqrecord.id,
sequence_name=seqrecord.id,
start=0,
end=len(seqrecord),
qualifiers=source_qualifiers,
)
)
yield ParsedAnnotationRecord(annotation=annotation, seqrecord=seqrecord)
if tot_genes + tot_features == 0:
raise EmptyGenBankError("GenBank parsing produced zero genes and zero features.")
def _extract_generic_features(
seqrecord: SeqRecord,
filtered_features: List[SeqFeature],
feature_parse_func: Callable[[FeatureIntervalGenBankCollection], Dict[str, Any]],
) -> Optional[List[Dict[str, Any]]]:
"""
Extract all generic features from a SeqRecord. These are anything that did not qualify as a gene, based
on the feature type being one of the known members of :class:`biocantor.io.genbank.constants.GenBankFeatures`.
Feature collections are inferred through the ``locus_tag`` field. Any items without such a tag are treated
separately.
Args:
seqrecord: A SeqRecord object.
filtered_features: List of SeqFeature objects associated with the SeqRecord that are not gene-like.
feature_parse_func: Optional feature interval parse function implementation.
Returns:
A list of dictionary representations of feature interval collections, or ``None`` if no feature intervals were
found.
"""
# sort by locus tag, or null if no locus tag is provided.
sorted_filtered_features = sorted(
filtered_features, key=lambda f: f.qualifiers.get(KnownQualifiers.LOCUS_TAG.value, [""])[0]
)
feature_collections = []
for locus_tag, features in itertools.groupby(
sorted_filtered_features, key=lambda f: f.qualifiers.get(KnownQualifiers.LOCUS_TAG.value, [""])[0]
):
if not locus_tag:
# we are in the null scenario, meaning that there are no locus tag information and thus no groupings.
for feature in features:
feature_collection = _construct_feature_collection_from_features([feature], seqrecord)
if feature_collection:
feature_collections.append(feature_collection)
else:
feature_collection = _construct_feature_collection_from_features(list(features), seqrecord)
if feature_collection:
feature_collections.append(feature_collection)
return [feature_parse_func(fc) for fc in feature_collections] if feature_collections else None
|
py | b40fed09b7bdfde880b5313da686dc6fa03b2d68 | from caffe_pb2 import *
|
py | b40fed7f827244614c06cf03fb6acf4bfc72f299 | from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.offer import models, results
from oscar.test.factories import VoucherFactory, ConditionalOfferFactory
class TestOfferApplicationsObject(TestCase):
def setUp(self):
self.applications = results.OfferApplications()
self.offer = models.ConditionalOffer()
def test_is_countable(self):
self.assertEqual(0, len(self.applications))
def test_can_filter_shipping_discounts(self):
result = models.ShippingDiscount()
self.applications.add(self.offer, result)
self.assertEqual(1, len(self.applications.shipping_discounts))
def test_can_filter_offer_discounts(self):
result = models.BasketDiscount(D('2.00'))
self.applications.add(self.offer, result)
self.assertEqual(1, len(self.applications.offer_discounts))
def test_can_filter_post_order_actions(self):
result = models.PostOrderAction("Something will happen")
self.applications.add(self.offer, result)
self.assertEqual(1, len(self.applications.post_order_actions))
def test_grouped_voucher_discounts(self):
voucher = VoucherFactory()
offer1 = ConditionalOfferFactory(name='offer1')
offer1.set_voucher(voucher)
result1 = models.BasketDiscount(D('2.00'))
offer2 = ConditionalOfferFactory(name='offer2')
offer2.set_voucher(voucher)
result2 = models.BasketDiscount(D('1.00'))
self.applications.add(offer1, result1)
self.applications.add(offer2, result2)
assert len(self.applications) == 2
discounts = self.applications.grouped_voucher_discounts
discounts = [x for x in discounts]
assert len(discounts) == 1
assert discounts[0]['voucher'] == voucher
assert discounts[0]['discount'] == D('3.00')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.