code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import numbers
import re
import six
from ..base import ESPObject
from ..utils import xml
from ..utils.data import gen_name
RegexType = type(re.compile(r''))
def listify(obj):
if obj is None:
return
if isinstance(obj, (tuple, set, list)):
return list(obj)
return [obj]
def map_properties(cls, props, required=None, delete=None):
'''
Remap property names
Parameters
----------
cls : Connector-subclass
The Connector class which contains the property definitions
props : dict
The dictionary of properties
required : string or list-of-strings, optional
The list of required keys. These will be returned in a tuple
in addition to the remaining dictionary of properties.
delete : string or list-of-strings, optional
List of keys to remove
Returns
-------
((required-params), {properties})
If ``required`` contains names
{properties}
If ``required` is empty
'''
delete = listify(delete) + ['self']
required = listify(required)
names = {}
names.update({v.name: k for k, v in six.iteritems(cls.property_defs)})
names.update({k: k for k, v in six.iteritems(cls.property_defs)})
out = {}
for key, value in six.iteritems(props):
if delete and key in delete:
continue
try:
out[names[key]] = value
except KeyError:
raise KeyError('%s is not a valid parameter for %s' % (key, cls))
if required:
req_out = []
for item in required:
req_out.append(out.pop(item, None))
return tuple(req_out), out
return out
def get_subclasses(cls):
for subclass in cls.__subclasses__():
for subcls in get_subclasses(subclass):
yield subcls
yield subclass
def get_connector_class(elem_or_class, type=None, properties=None):
'''
Get a connector class that matches the current element
Parameters
----------
elem_or_class : string or Element
The name of the connector class, or an XML definition
type : string, optional
The type of the connector. Ignored if ``elem_or_class`` is an Element.
properties : dict, optional
The properties for the connector. Ignored if ``elem_or_class` is
an Element.
Returns
-------
:class:`Connector` or subclass of :class:`Connector`
'''
if isinstance(elem_or_class, six.string_types):
if elem_or_class.startswith('<'):
elem_or_class = xml.ensure_element(elem_or_class)
if isinstance(elem_or_class, six.string_types):
cls = elem_or_class
if type is None:
type = 'subscribe'
if properties is None:
properties = {}
else:
elem = elem_or_class
cls = elem.attrib['class']
if 'type' in elem.attrib:
type = elem.attrib['type']
else:
type = elem.find('./properties/property[@name="type"]').text
properties = {}
for item in elem.findall('./properties/property'):
properties[item.attrib['name']] = item.text
if type.startswith('p'):
type = 'publish'
else:
type = 'subscribe'
out = []
for item in get_subclasses(Connector):
if item.connector_key['cls'] == cls and item.connector_key['type'] == type:
out.append(item)
if not out:
return Connector
if len(out) == 1:
return out[0]
# Check extra matching properties
matches = True
for item in reversed(sorted(out, key=lambda x: len(x.connector_key))):
matches = True
for key, value in six.iteritems(item.connector_key):
if key == 'cls' or key == 'type':
continue
elif isinstance(value, RegexType):
eprop = properties.get(key)
if eprop is None or not value.match(eprop):
matches = False
break
else:
eprop = properties.get(key)
if eprop is None or value != eprop:
matches = False
break
if matches:
break
if matches:
return item
return Connector
def prop(name, dtype, required=False, valid_values=None, valid_expr=None, default=None):
return ConnectorProperty(name, dtype, required=required,
valid_values=valid_values,
valid_expr=valid_expr,
default=None)
class ConnectorProperty(object):
def __init__(self, name, dtype, required=False, valid_values=None,
valid_expr=None, default=None):
self.name = name
self.dtype = dtype
self.required = required
if valid_values is None:
self._valid_values = None
elif isinstance(valid_values, (six.string_types, RegexType)):
self._valid_values = [valid_values]
else:
self._valid_values = list(valid_values)
if valid_expr is None:
self._valid_expr = None
elif isinstance(valid_expr, (six.string_types, RegexType)):
self._valid_expr = [valid_expr]
else:
self._valid_expr = list(valid_expr)
if default is None:
self.default = default
else:
self.default = self.validate_value(default)
def validate_type(self, value):
'''
Verify that the given value is the correct type
Parameters
----------
value : any
The property value
Raises
------
TypeError
If the value is not the declared type
'''
unmatched_types = []
dtypes = self.dtype
if not isinstance(self.dtype, (list, tuple)):
dtypes = [self.dtype]
for dtype in dtypes:
if dtype in ['int', int]:
if not isinstance(value, numbers.Integral):
unmatched_types.append(dtype)
elif dtype in ['float', 'double', float]:
if not isinstance(value, (numbers.Integral, numbers.Real)):
unmatched_types.append(dtype)
elif dtype in ['boolean', 'bool', bool]:
if value is not True and value is not False:
unmatched_types.append(dtype)
elif dtype in ['string', str]:
if not isinstance(value, six.string_types):
unmatched_types.append(dtype)
else:
raise TypeError('Unknown data type: %s' % dtype)
if len(unmatched_types) == len(dtype):
raise TypeError('%s is not one of: %s' % ', '.join(unmatched_types))
def validate_value(self, value):
'''
Verify that the value is valid
Parameters
----------
value : any
The property value to test
Returns
-------
bool
'''
if value is None:
return not(self.required)
# If it's an environment variable, always return true
if isinstance(value, six.string_types):
print(bool(re.search(r'@\w+@', value)))
if isinstance(value, six.string_types) and bool(re.search('r@\w+@', value)):
return True
# Make sure value is the correct type
self.validate_type(value)
# Check specific values and regular expressions
valid = True
if self._valid_values:
regexes = [x for x in self._valid_values if isinstance(x, RegexType)]
values = [x for x in self._valid_values if not isinstance(x, RegexType)]
if valid and values and value not in values:
valid = False
if valid and regexes:
for item in regexes:
if not item.search(value):
valid = False
break
# Check expressions and regular expressions
if valid and self._valid_expr:
for item in self._valid_expr:
if isinstance(item, RegexType) and not re.search(item, value):
valid = False
elif isinstance(item, six.string_types):
if not eval(item):
valid = False
break
return valid
class Connector(collections.abc.MutableMapping):
'''
Window connector
Parameters
----------
conncls : string
The connecter class name
type : string, optional
The type of the connector
name : string, optional
The name of the connector
properties : dict, optional
Dictionary of connector properties
'''
connector_key = dict(cls='', type='')
property_defs = dict()
def __init__(self, conncls, type=None, name=None, is_active=None, properties=None):
self.cls = conncls
self.name = name or gen_name(prefix='c_')
self.type = type
self.is_active = is_active
self.properties = {}
self.set_properties(**properties)
def copy(self, deep=False):
'''
Return a copy of the object
Parameters
----------
deep : bool, optional
Should sub-objects be copied as well?
Returns
-------
:class:`Connector`
'''
return type(self).from_parameters(conncls=self.cls, type=self.type,
name=self.name, is_active=self.is_active,
properties=self.properties)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo):
return self.copy(deep=True)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
return cls(conncls, type=type, name=name, is_active=None,
properties=properties)
@classmethod
def from_element(cls, data, session=None):
'''
Construct connector from XML definition
Parameters
----------
data : xml-string or ElementTree.Element
The XML definition
session : requests.Session, optional
The session object associated with the server
Returns
-------
:class:`Connector`
'''
data = xml.ensure_element(data)
conncls = data.attrib['class']
name = data.attrib.get('name')
type = data.attrib.get('type')
is_active = data.attrib.get('active')
properties = {}
for item in data.findall('./properties/property'):
properties[item.attrib['name']] = item.text
return get_connector_class(data).from_parameters(conncls, type=type,
name=name,
is_active=is_active,
properties=properties)
from_xml = from_element
def to_element(self):
'''
Export connector definition to ElementTree.Element
Returns
-------
:class:`ElementTree.Element`
'''
out = xml.new_elem('connector', attrib={'class': self.cls,
'name': self.name,
'active': self.is_active,
'type': self.type})
sorted_items = sorted([(k, v)for k, v in six.iteritems(self.properties)])
properties = collections.OrderedDict(sorted_items)
# Add defaults
for key, value in six.iteritems(type(self).property_defs):
if value.default is not None and properties.get(key, None) is None:
xml.add_properties(out, {key: value.default})
# Always make sure configfilesection= is next
if properties.get('configfilesection', None) is not None:
xml.add_properties(out, dict(configfilesection=properties['configfilesection']))
properties.pop('configfilesection')
# Add the rest
xml.add_properties(out, properties)
return out
def to_xml(self, pretty=False):
'''
Export connector definition to XML
Parameters
----------
pretty : bool, optional
Should the output include whitespace for readability?
Returns
-------
string
'''
return xml.to_xml(self.to_element(), pretty=pretty)
def set_properties(self, **kwargs):
'''
Set connector properties
Parameters
----------
**kwargs : keyword-parameters, optional
Key / value pairs of properties
'''
for key, value in six.iteritems(kwargs):
if key == 'type':
if value.startswith('p'):
self.type = 'publish'
else:
self.type = 'subscribe'
continue
if key == 'name':
self.name = value
continue
self.properties[key] = value
self.properties = {k: v for k, v in six.iteritems(self.properties)
if v is not None}
def __getitem__(self, key):
return self.properties[key]
def __setitem__(self, key, value):
self.properties[key] = value
def __delitem__(self, key):
del self.properties[key]
def __iter__(self):
return iter(self.properties)
def __len__(self):
return len(self.properties)
def __str__(self):
if self.type and self.name:
return '%s(%s, name=%s, type=%s, properties=%s)' % \
(type(self).__name__, repr(self.cls), repr(self.name),
repr(self.type), repr(self.properties))
if self.type:
return '%s(%s, type=%s, properties=%s)' % \
(type(self).__name__, repr(self.cls),
repr(self.type), repr(self.properties))
return '%s(%s, name=%s, properties=%s)' % \
(type(self).__name__, repr(self.cls),
repr(self.name), repr(self.properties))
def __repr__(self):
return str(self)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/base.py
| 0.782579 | 0.193814 |
base.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class RabbitMQSubscriber(Connector):
'''
Subscribe to Rabbit MQ events
Parameters
----------
rmqhost : string
Specifies the Rabbit MQ server host name
mqport : string
Specifies the Rabbit MQ server port
rmquserid : string
Specifies the user name required to authenticate the connector's
session with the Rabbit MQ server.
rmqpassword : string
Specifies the password associated with rmquserid
rmqexchange : string
Specifies the Rabbit MQ exchange created by the connector,
if nonexistent.
rmqtopic : string
Specifies the Rabbit MQ routing key to which messages are published.
rmqtype : string
Specifies binary, CSV, JSON, or the name of a string field in
the subscribed window schema.
urlhostport : string
Specifies the host:port field in the metadata topic subscribed to
on start-up to field metadata requests.
numbufferedmsgs : int
Specifies the maximum number of messages buffered by a standby
subscriber connector.
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
hotfailover : boolean, optional
Enables hot failover mode
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted
as an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
buspersistence : boolean, optional
Specify to send messages using persistent delivery mode
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition used to convert event blocks to protobuf messages.
When you specify this parameter, you must also specify the
protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in
the .proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
csvincludeschema : string, optional
When rmqtype=CSV, specifies when to prepend output CSV data with
the window's serialized schema. Valid values are never, once, and
pereventblock. The default value is never.
useclientmsgid : boolean, optional
When performing a failover operation and extracting a message ID
from an event block, use the client-generated message ID instead
of the engine-generated message ID.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
rmqpasswordencrypted : boolean, optional
Specifies that rmqpassword is encrypted
rmqvhost : string, optional
Specifies the Rabbit MQ vhost. The default is "/".
csvmsgperevent : int, optional
For CSV, specifies to send one message per event. The default is
one message per transactional event block or else one message per event.
csvmsgpereventblock : int, optional
For CSV, specifies to send one message per event block. The default
is one message per transactional event block or else one message
per event.
rmqcontenttype : string, optional
Specifies the value of the content_type parameter in messages
sent to RabbitMQ.
rmqheaders : string, optional
A comma separated list of key value optional headers in messages
sent to RabbitMQ. The default value is no headers.
rmqssl : boolean, optional
Specifies to enable SSL encryption on the connection to the
Rabbit MQ server.
rmqsslcacert : string, optional
When rmqssl is enabled, specifies the full path of the SSL CA
certificate .pem file.
rmqsslkey : string, optional
When rmqssl is enabled, specifies the full path of the SSL key
.pem file.
rmqsslcert string, optional
When rmqssl is enabled, specifies the full path of the SSL
certificate .pem file.
Returns
-------
:class:`RabbitMQSubscriber`
'''
connector_key = dict(cls='rmq', type='subscribe')
property_defs = dict(
rmquserid=prop('rmquserid', dtype='string', required=True),
rmqpassword=prop('rmqpassword', dtype='string', required=True),
rmqhost=prop('rmqhost', dtype='string', required=True),
rmqport=prop('rmqport', dtype='int', required=True),
rmqexchange=prop('rmqexchange', dtype='string', required=True),
rmqtopic=prop('rmqtopic', dtype='string', required=True),
rmqtype=prop('rmqtype', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
numbufferedmsgs=prop('numbufferedmsgs', dtype='int', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
collapse=prop('collapse', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
hotfailover=prop('hotfailover', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
buspersistence=prop('buspersistence', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
csvincludeschema=prop('csvincludeschema', dtype='string'),
useclientmsgid=prop('useclientmsgid', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
rmqpasswordencrypted=prop('rmqpasswordencrypted', dtype='boolean'),
rmqvhost=prop('rmqvhost', dtype='string'),
csvmsgperevent=prop('csvmsgperevent', dtype='int'),
csvmsgpereventblock=prop('csvmsgpereventblock', dtype='int'),
rmqcontenttype=prop('rmqcontenttype', dtype='string'),
rmqheaders=prop('rmqheaders', dtype='string'),
rmqssl=prop('rmqssl', dtype='boolean'),
rmqsslcacert=prop('rmqsslcacert', dtype='string'),
rmqsslkey=prop('rmqsslkey', dtype='string'),
rmqsslcert=prop('rmqsslcert', dtype='string')
)
def __init__(self, rmqhost=None, rmqport=None, rmquserid=None,
rmqpassword=None, rmqexchange=None,
rmqtopic=None, rmqtype=None, urlhostport=None, numbufferedmsgs=None,
snapshot=None, name=None, is_active=None, collapse=None,
rmretdel=None, hotfailover=None, dateformat=None,
buspersistence=None, protofile=None, protomsg=None,
csvincludeschema=None, useclientmsgid=None,
configfilesection=None, rmqpasswordencrypted=None,
rmqvhost=None, csvmsgperevent=None, csvmsgpereventblock=None,
rmqcontenttype=None, rmqheaders=None, rmqssl=None,
rmqsslcacert=None, rmqsslkey=None, rmqsslcert=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'rmq', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['rmqhost', 'rmqport', 'rmquserid',
'rmqpassword', 'rmqexchange',
'rmqtopic', 'rmqtype',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
req[7], name=name, is_active=is_active, **properties)
class RabbitMQPublisher(Connector):
'''
Publish Rabbit MQ events
Parameters
----------
rmqhost : string
Specifies the Rabbit MQ server host name
mqport : string
Specifies the Rabbit MQ server port
rmquserid : string
Specifies the user name required to authenticate the connector’s
session with the Rabbit MQ server.
rmqpassword : string
Specifies the password associated with rmquserid
rmqexchange : string
Specifies the Rabbit MQ exchange created by the connector,
if nonexistent.
rmqtopic : string
Specifies the Rabbit MQ routing key to which messages are published.
rmqtype : string
Specifies binary, CSV, JSON, or the name of a string field in
the subscribed window schema.
urlhostport : string
Specifies the host:port field in the metadata topic subscribed to
on start-up to field metadata requests.
transactional : string, optional
When rmqtype=CSV, sets the event block type to transactional.
The default value is normal.
blocksize : int, optional
When rmqtype=CSV, specifies the number of events to include in a
published event block. The default value is 1.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted
as an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
buspersistence : string, optional
Controls both auto-delete and durable
buspersistencequeue : string, optional
Specifies the queue name used by a persistent publisher
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be
parsed, the event is dropped, an error is logged, and
publishing continues.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition used to convert event blocks to
protobuf messages. When you specify this parameter, you must
also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV
events. The default delimiter is the , character.
noautogenfield : boolean, optional
Specifies that input events are missing the key field that is
autogenerated by the source window.
ackwindow : int, optional
Specifies the time period (in seconds) to leave messages that
are received from Rabbit MQ unacknowledged.
acktimer : int, optional
Specifies the time interval (in seconds) for how often to
check whether to send acknowledgments that are triggered by
the ackwindow parameter. Must be configured if ackwindow is
configured.
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert
rmqpasswordencrypted : boolean, optional
Specifies that rmqpassword is encrypted
addcsvopcode : boolean, optional
Prepends an opcode and comma to input CSV events. The opcode
is Insert unless publishwithupsert is enabled.
addcsvflags : string, optional
Specifies the event type to insert into input CSV events
(with a comma). Valid values are "normal" and "partialupdate".
rmqvhost : string, optional
Specifies the Rabbit MQ vhost. The default is "/"
useclientmsgid : string, optional
If the Source window has been restored from a persist to disk,
ignores received binary event blocks that contain a message ID
less than the greatest message ID in the restored window.
rmqssl : boolean, optional
Specifies to enable SSL encryption on the connection to the
Rabbit MQ server.
rmqsslcacert : string, optional
When rmqssl is enabled, specifies the full path of the SSL CA
certificate .pem file.
rmqsslkey : string, optional
When rmqssl is enabled, specifies the full path of the SSL key
.pem file.
rmqsslcert string, optional
When rmqssl is enabled, specifies the full path of the SSL
certificate .pem file.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`RabbitMQPublisher`
'''
connector_key = dict(cls='rmq', type='publish')
property_defs = dict(
rmquserid=prop('rmquserid', dtype='string', required=True),
rmqpassword=prop('rmqpassword', dtype='string', required=True),
rmqhost=prop('rmqhost', dtype='string', required=True),
rmqport=prop('rmqport', dtype='int', required=True),
rmqexchange=prop('rmqexchange', dtype='string', required=True),
rmqtopic=prop('rmqtopic', dtype='string', required=True),
rmqtype=prop('rmqtype', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
dateformat=prop('dateformat', dtype='string'),
buspersistence=prop('buspersistence', dtype='string'),
buspersistencequeue=prop('buspersistencequeue', dtype='string'),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
ackwindow=prop('ackwindow', dtype='int'),
acktimer=prop('acktimer', dtype='int'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
rmqpasswordencrypted=prop('rmqpasswordencrypted', dtype='boolean'),
addcsvopcode=prop('addcsvopcode', dtype='boolean'),
addcsvflags=prop('addcsvflags', dtype='string'),
rmqvhost=prop('rmqvhost', dtype='string'),
useclientmsgid=prop('useclientmsgid', dtype='boolean'),
rmqssl=prop('rmqssl', dtype='boolean'),
rmqsslcacert=prop('rmqsslcacert', dtype='string'),
rmqsslkey=prop('rmqsslkey', dtype='string'),
rmqsslcert=prop('rmqsslcert', dtype='string'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, rmqhost=None, rmqport=None, rmquserid=None,
rmqpassword=None, rmqexchange=None,
rmqtopic=None, rmqtype=None, urlhostport=None,
name=None, is_active=None,
transactional=None, blocksize=None, dateformat=None,
buspersistence=None, buspersistencequeue=None,
ignorecsvparseerrors=None, protofile=None, protomsg=None,
configfilesection=None, csvfielddelimiter=None,
noautogenfield=None, ackwindow=None, acktimer=None,
publishwithupsert=None, rmqpasswordencrypted=None,
addcsvopcode=None, addcsvflags=None, rmqvhost=None,
useclientmsgid=None, rmqssl=None, rmqsslcacert=None,
rmqsslkey=None, rmqsslcert=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'rmq', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['rmqhost', 'rmqport',
'rmquserid', 'rmqpassword',
'rmqexchange', 'rmqtopic',
'rmqtype', 'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
req[7], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/rabbitmq.py
| 0.763131 | 0.184565 |
rabbitmq.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class OPCUASubscriber(Connector):
'''
Subscribe to OPC-UA operations
Parameters
----------
opcuaendpoint : string, optional
Specifies the OPC-UA server endpoint (only the portion
following opc.tcp://).
opcuanamespaceuri : string, optional
Specifies the OPC-UA server namespace URI. The default is the
namespace at index=0.
opcuausername : string, optional
Specifies the OPC-UA user name. The default is none.
opcuapassword : string, optional
Specifies the OPC-UA password. The default is none.
opcuanodeids : string, optional
Specifies a comma-separated list of Node IDs to map to ESP window
schema fields, in the form <identifier type>_<identifier>. The list
size must be equal to the number of fields in the subscribed window
schema. Window field names are in Node ID form by default.
configfilesection : string, optional
Specifies the name of the section in the connector config file to
parse for configuration parameters. Specify the value
as [configfilesection].
snapshot : boolean, optional
Specifies whether to send snapshot data
Returns
-------
:class:`OPCUASubscriber`
'''
connector_key = dict(cls='opcua', type='subscribe')
property_defs = dict(
opcuaendpoint=prop('opcuaendpoint', dtype='string'),
opcuanamespaceuri=prop('opcuanamespaceuri', dtype='string'),
opcuausername=prop('opcuausername', dtype='string'),
opcuapassword=prop('opcuapassword', dtype='string'),
opcuanodeids=prop('opcuanodeids', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
)
def __init__(self, opcuaendpoint=None, name=None, is_active=None,
opcuanamespaceuri=None, opcuausername=None,
opcuapassword=None, opcuanodeids=None,
configfilesection=None, snapshot=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'opcua', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
properties = map_properties(cls, properties, delete='type')
return cls(name=name, is_active=is_active, **properties)
class OPCUAPublisher(Connector):
'''
Publish OPC-UA operations
Parameters
----------
opcuaendpoint : string, optional
Specifies the OPC-UA server endpoint (only the portion
following opc.tcp://).
opcuanamespaceuri : string, optional
Specifies the OPC-UA server namespace URI. The default is the
namespace at index=0.
opcuausername : string, optional
Specifies the OPC-UA user name. The default is none.
opcuapassword : string, optional
Specifies the OPC-UA password. The default is none.
opcuanodeids : string, optional
Specifies a comma-separated list of Node IDs to map to ESP window
schema fields, in the form <identifier type>_<identifier>. The list
size must be equal to the number of fields in the subscribed window
schema. Window field names are in Node ID form by default.
publishinterval : int, optional
Specifies an interval in seconds when current values of all nodes
in the Source window schema are published. The default is to publish
when one or more values changes.
transactional : string, optional
Sets the event block type to transactional. The default value is normal.
blocksize : int, optional
Specifies the number of events to include in a published event
block. The default value is 1.
configfilesection : string, optional
Specifies the name of the section in the connector config file to
parse for configuration parameters. Specify the value
as [configfilesection].
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`OPCUAPublisher`
'''
connector_key = dict(cls='opcua', type='publish')
property_defs = dict(
opcuaendpoint=prop('opcuaendpoint', dtype='string'),
opcuanamespaceuri=prop('opcuanamespaceuri', dtype='string'),
opcuausername=prop('opcuausername', dtype='string'),
opcuapassword=prop('opcuapassword', dtype='string'),
opcuanodeids=prop('opcuanodeids', dtype='string'),
publishinterval=prop('publishinterval', dtype='int'),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
configfilesection=prop('configfilesection', dtype='string'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, opcuaendpoint=None, name=None, is_active=None,
opcuanamespaceuri=None, opcuausername=None,
opcuapassword=None, opcuanodeids=None,
publishinterval=None, transactional=None,
blocksize=None, configfilesection=None,
publishwithupsert=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'opcua', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
properties = map_properties(cls, properties, delete='type')
return cls(name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/opcua.py
| 0.846609 | 0.181191 |
opcua.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class UVCPublisher(Connector):
'''
Publish photos taken by a V4L2 compatible
Parameters
----------
frame_rate : float, optional
Specifies the frames per second that the camera streams.
Must be a double. The default value is 15.
format_in : string, optional
Specifies the image format of captured photos.
The default is jpeg. yuyv, an uncompressed image format,
is also supported.
format_out : string, optional
Specifies the image format that the connector publishes.
The default is jpeg. yuyv, an uncompressed image format,
is supported only when format_in is yuyv.
width : int, optional
Specifies the height of the photo.
height : int, optional
Specifies the width of the photo.
brightness : string, optional
Specifies the brightness of the photo.
gain : string, optional
Specifies the gain of the photo.
saturation : string, optional
Specifies the saturation of the photo.
contrast : string, optional
Specifies the contrast of the photo.
device : string, optional
Specifies the device name the camera is using on the
Linux operating system.
blocking : boolean, optional
Specifies whether the connector is in blocking mode.
predelay : int, optional
Specifies a delay time, in seconds, on starting
the connector.
maxevents : int, optional
Specifies the maximum number of events to publish.
cameraid : string, optional
Specifies an arbitrary string that is copied into the corresponding string field in the Source window. This value can be used by the model to identify the source camera.
Returns
-------
:class:`UVCPublisher`
'''
connector_key = dict(cls='uvc', type='publish')
property_defs = dict(
frame_rate=prop('frame_rate', dtype='float'),
format_in=prop('format_in', dtype='string'),
format_out=prop('format_out', dtype='string'),
width=prop('width', dtype='int'),
height=prop('height', dtype='int'),
brightness=prop('brightness', dtype='string'),
gain=prop('gain', dtype='string'),
saturation=prop('saturation', dtype='string'),
contrast=prop('contrast', dtype='string'),
device=prop('device', dtype='string'),
blocking=prop('blocking', dtype='boolean'),
predelay=prop('predelay', dtype='int'),
maxevents=prop('maxevents', dtype='int'),
cameraid=prop('cameraid', dtype='string')
)
def __init__(self, name=None, is_active=None,
frame_rate=None, format_in=None, format_out=None,
width=None, height=None, brightness=None,
gain=None, saturation=None, contrast=None,
device=None, blocking=None, predelay=None,
maxevents=None, cameraid=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'uvc', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
properties = map_properties(cls, properties, delete='type')
return cls(name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/uvc.py
| 0.903233 | 0.340513 |
uvc.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class WebSphereMQSubscriber(Connector):
'''
Subscribe to IBM WebSphere MQ events
Parameters
----------
mqtype : string
Specifies binary, CSV, JSON, XML, or the name of a string
field in the subscribed window schema.
snapshot : boolean, optional
Specifies whether to send snapshot data.
mqtopic : string, optional
Specifies the MQ topic name. Required if mqqueue is
not configured.
mqqueue : string, optional
Specifies the MQ queue name. Required if mqtopic is
not configured.
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
queuemanager : string, optional
Specifies the MQ queue manager.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP
fields in CSV events. The default behavior is these fields
are interpreted as an integer number of seconds
(ESP_DATETIME) or microseconds (ESP_TIMESTAMP) since epoch.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks
received by a subscriber that were introduced by a
window retention policy.
configfilesection : string, optional
Specifies the name of the section in the connector config
file to parse for configuration parameters. Specifies
the value as [configfilesection].
protofile : string, optional
Specifies the .proto file that contains the Google
Protocol Buffers message definition. This definition is
used to convert event blocks to protobuf messages. When
you specify this parameter, you must also specify the
protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message
in the .proto file that you specified with the protofile
parameter. Event blocks are converted into this message.
usecorrelid : boolean, optional
Copies the value of the correlid field in the event to
the MQ message correlation ID.
csvmsgperevent : int, optional
For CSV, specifies to send one message per event. The
default is one message per transactional event block or
else one message per event.
csvmsgpereventblock : int, optional
For CSV, specifies to send one message per event block.
The default is one message per transactional event block
or else one message per event.
Returns
-------
:class:`WebSphereMQSubscriber`
'''
connector_key = dict(cls='mq', type='subscribe')
property_defs = dict(
mqtype=prop('mqtype', dtype='string', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
mqtopic=prop('mqtopic', dtype='string'),
mqqueue=prop('mqqueue', dtype='string'),
collapse=prop('collapse', dtype='string'),
queuemanager=prop('queuemanager', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
usecorrelid=prop('usecorrelid', dtype='boolean'),
csvmsgperevent=prop('csvmsgperevent', dtype='int'),
csvmsgpereventblock=prop('csvmsgpereventblock', dtype='int')
)
def __init__(self, mqtype=None, name=None, is_active=None, snapshot=None,
mqtopic=None, mqqueue=None, collapse=None,
queuemanager=None, dateformat=None, rmretdel=None,
configfilesection=None, protofile=None,
protomsg=None, usecorrelid=None, csvmsgperevent=None,
csvmsgpereventblock=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'mq', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['mqtype'],
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
class WebSphereMQPublisher(Connector):
'''
Publish IBM WebSphere MQ events
Parameters
----------
mqtype : string
Specifies binary, CSV, JSON, XML, or opaquestring. For
opaquestring, the Source window schema is assumed to
be "index:int64,message:string".
mqtopic : string, optional
Specifies the MQ topic name. Required if mqqueue is
not configured.
mqqueue : string, optional
Specifies the MQ queue name. Required if mqtopic is
not configured.
mqsubname : string, optional
Specifies the MQ subscription name. Required if
mqtopic is configured.
blocksize : int, optional
Specifies the number of events to include in a
published event block. The default value is 1.
transactional : string, optional
Sets the event block type to transactional. The
default value is normal.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP
fields in CSV events. The default behavior is these
fields are interpreted as an integer number of
seconds (ESP_DATETIME) or microseconds (ESP_TIMESTAMP)
since epoch.
queuemanager : string, optional
Specifies the MQ queue manager.
configfilesection : string, optional
Specifies the name of the section in the connector config
file to parse for configuration parameters. Specifies
the value as [configfilesection].
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot
be parsed, the event is dropped, an error is logged,
and publishing continues.
protofile : string, optional
Specifies the .proto file that contains the Google
Protocol Buffers message definition. This definition
is used to convert event blocks to protobuf messages.
When you specify this parameter, you must also specify
the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message
in the .proto file that you specified with the protofile
parameter. Event blocks are converted into this message.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in
input CSV events. The default delimiter is the , character.
noautogenfield : boolean, optional
Specifies that input events are missing the key field
that is autogenerated by the source window.
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert.
addcsvopcode : boolean, optional
Prepends an opcode and comma to input CSV events. The
opcode is Insert unless publishwithupsert is enabled.
addcsvflags : string, optional
Specifies the event type to insert into input CSV
events (with a comma). Valid values are "normal"
and "partialupdate".
usecorrelid : boolean, optional
Copies the value of the MQ message correlation ID into
the correlid field in every Event Stream Processing
event.
ignoremqmdformat : string, optional
Specifies to ignore the value of the Message Descriptor
Format parameter, and assume the message format is
compatible with the mqtype parameter setting.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`WebSphereMQPublisher`
'''
connector_key = dict(cls='mq', type='publish')
property_defs = dict(
mqtype=prop('mqtype', dtype='string', required=True),
mqtopic=prop('mqtopic', dtype='string'),
mqqueue=prop('mqqueue', dtype='string'),
mqsubname=prop('mqsubname', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
transactional=prop('transactional', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
queuemanager=prop('queuemanager', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
addcsvopcode=prop('addcsvopcode', dtype='boolean'),
addcsvflags=prop('addcsvflags', dtype='string'),
usecorrelid=prop('usecorrelid', dtype='boolean'),
ignoremqmdformat=prop('ignoremqmdformat', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, mqtype=None, name=None, is_active=None,
mqtopic=None, mqqueue=None, mqsubname=None,
blocksize=None, transactional=None, dateformat=None,
queuemanager=None, configfilesection=None,
ignorecsvparseerrors=None, protofile=None,
protomsg=None, csvfielddelimiter=None,
noautogenfield=None, publishwithupsert=None,
addcsvopcode=None, addcsvflags=None,
usecorrelid=None, ignoremqmdformat=None,
maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'mq', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['mqtype'],
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/websphere.py
| 0.830044 | 0.202877 |
websphere.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class TervelaSubscriber(Connector):
'''
Subscribe to Tervela Data Fabric events
Parameters
----------
tvaprimarytmx : string
Specifies the host name or IP address of the primary TMX
tvauserid : string
Specifies a user name defined in the Tervela TPM.
Publish-topic entitlement rights must be associated with
this user name.
tvapassword : string
Specifies the password associated with tvauserid
tvatopic : string
Specifies the topic name for the topic to which to subscribed.
This topic must be configured on the TPM for the GD service and
tvauserid must be assigned the Guaranteed Delivery subscribe
rights for this Topic in the TPM.
tvaclientname : string
Specifies the client name associated with the Tervela
Guaranteed Delivery context.
tvamaxoutstand : int
Specifies the maximum number of unacknowledged messages that
can be published to the Tervela fabric (effectively the size of
the publication cache). Should be twice the expected transmit rate.
numbufferedmsgs : int
Specifies the maximum number of messages buffered by a standby
subscriber connector.
urlhostport : string
Specifies the “host/port” string sent in the metadata message
published by the connector on topic SAS.META.tvaclientname when
it starts.
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
hotfailover : boolean, optional
Enables hot failover mode
tvasecondarytmx : string, optional
Specifies the host name or IP address of the secondary TMX.
Required if logging in to a fault-tolerant pair.
tvalogfile : string, optional
Causes the connector to log to the specified file instead of to
syslog (on Linux or Solaris) or Tervela.log (on Windows)
tvapubbwlimit : int, optional
Specifies the maximum bandwidth, in Mbps, of data published to
the fabric. The default value is 100 Mbps.
tvapubrate : int, optional
Specifies the rate at which data messages are published to the
fabric, in Kbps. The default value is 30,000 messages per second.
tvapubmsgexp : int, optional
Specifies the maximum amount of time, in seconds, that published
messages are kept in the cache in the Tervela API.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
configfilesection : string, optional
Specifies the name of the section in the connector config file to
parse for configuration parameters. Specify the value
as [configfilesection].
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition. This definition is used to convert event blocks
to protobuf messages. When you specify this parameter, you must
also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter. Event
blocks are converted into this message.
json : boolean, optional
Enables transport of event blocks encoded as JSON messages
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted
as an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
tvapasswordencrypted : boolean, optional
Specifies that tvapassword is encrypted
Returns
-------
:class:`TervelaSubscriber`
'''
connector_key = dict(cls='tervela', type='subscribe')
property_defs = dict(
tvaprimarytmx=prop('tvaprimarytmx', dtype='string', required=True),
tvauserid=prop('tvauserid', dtype='string', required=True),
tvapassword=prop('tvapassword', dtype='string', required=True),
tvatopic=prop('tvatopic', dtype='string', required=True),
tvaclientname=prop('tvaclientname', dtype='string', required=True),
tvamaxoutstand=prop('tvamaxoutstand', dtype='int', required=True),
numbufferedmsgs=prop('numbufferedmsgs', dtype='int', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
snapshot=prop('snapshot', dtype='string', required=True, default=False),
collapse=prop('collapse', dtype='string'),
hotfailover=prop('hotfailover', dtype='boolean'),
tvasecondarytmx=prop('tvasecondarytmx', dtype='string'),
tvalogfile=prop('tvalogfile', dtype='string'),
tvapubbwlimit=prop('tvapubbwlimit', dtype='int'),
tvapubrate=prop('tvapubrate', dtype='int'),
tvapubmsgexp=prop('tvapubmsgexp', dtype='int'),
rmretdel=prop('rmretdel', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
json=prop('json', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
tvapasswordencrypted=prop('tvapasswordencrypted', dtype='boolean')
)
def __init__(self, tvaprimarytmx=None, tvauserid=None, tvapassword=None,
tvatopic=None, tvaclientname=None, tvamaxoutstand=None,
numbufferedmsgs=None, urlhostport=None,
name=None, is_active=None, snapshot=None,
collapse=None, hotfailover=None, tvasecondarytmx=None,
tvalogfile=None, tvapubbwlimit=None, tvapubrate=None,
tvapubmsgexp=None, rmretdel=None, configfilesection=None,
protofile=None, protomsg=None, json=None,
dateformat=None, tvapasswordencrypted=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tervela', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tvaprimarytmx',
'tvauserid',
'tvapassword',
'tvatopic',
'tvaclientname',
'tvamaxoutstand',
'numbufferedmsgs',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
req[7], name=name, is_active=is_active, **properties)
class TervelaPublisher(Connector):
'''
Subscribe to Tervela Data Fabric events
Parameters
----------
tvaprimarytmx : string
Specifies the host name or IP address of the primary TMX
tvauserid : string
Specifies a user name defined in the Tervela TPM. Subscribe-topic
entitlement rights must be associated with this user name.
tvapassword : string
Specifies the password associated with tvauserid
tvatopic : string
Specifies the topic name for the topic to which to publish. This
topic must be configured on the TPM for the GD service.
tvaclientname : string
Specifies the client name associated with the Tervela Guaranteed
Delivery context. Must be unique among all instances of
Tervela connectors.
tvasubname : string
Specifies the name assigned to the Guaranteed Delivery subscription
being created. The combination of this name and tvaclientname
are used by the fabric to replay the last subscription state
urlhostport : string
Specifies the “host:port” string sent in the metadata message
published by the connector on topic SAS.META.tvaclientname when
it starts.
tvasecondarytmx : string, optional
Specifies the host name or IP address of the secondary TMX.
Required when logging in to a fault-tolerant pair.
tvalogfile : string, optional
Causes the connector to log to the specified file instead of to
syslog (on Linux or Solaris) or Tervela.log (on Windows)
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition. This definition is used to convert
event blocks to protobuf messages. When you specify this
parameter, you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
json : boolean, optional
Enables transport of event blocks encoded as JSON messages.
publishwithupsert : boolean, optional
Specifies to build events with opcode = Upsert instead of
opcode = Insert.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events. The default behavior is these fields are
interpreted as an integer number of seconds (ESP_DATETIME)
or microseconds (ESP_TIMESTAMP) since epoch.
tvapasswordencrypted : boolean, optional
Specifies that tvapassword is encrypted
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`TervelaPublisher`
'''
connector_key = dict(cls='tva', type='publish')
property_defs = dict(
tvaprimarytmx=prop('tvaprimarytmx', dtype='string', required=True),
tvauserid=prop('tvauserid', dtype='string', required=True),
tvapassword=prop('tvapassword', dtype='string', required=True),
tvatopic=prop('tvatopic', dtype='string', required=True),
tvaclientname=prop('tvaclientname', dtype='string', required=True),
tvasubname=prop('tvasubname', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
tvasecondarytmx=prop('tvasecondarytmx', dtype='string'),
tvalogfile=prop('tvalogfile', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
json=prop('json', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
tvapasswordencrypted=prop('tvapasswordencrypted', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, tvaprimarytmx=None, tvauserid=None,
tvapassword=None, tvatopic=None,
tvaclientname=None, tvasubname=None, urlhostport=None,
name=None, is_active=None,
tvasecondarytmx=None, tvalogfile=None,
configfilesection=None, protofile=None, protomsg=None,
json=None, publishwithupsert=None, dateformat=None,
tvapasswordencrypted=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tva', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tvaprimarytmx',
'tvauserid',
'tvapassword',
'tvatopic',
'tvaclientname',
'tvasubname',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/tervela.py
| 0.754553 | 0.327292 |
tervela.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class PylonPublisher(Connector):
'''
Publish Basler GigE camera captured frames
Parameters
----------
cameraipaddress : string, optional
Specifies the camera IP address. The default value is the
address of the first camera found on the local subnet.
maxnumframes : int, optional
Specifies the maximum number of frames to publish. The default
value is no maximum.
maxframerate : int, optional
Specifies the maximum number of frames per second to publish.
The default value is the rate at which frames are received
from the camera.
camerafeaturesfile : string, optional
Specifies a Pylon Features Stream (.pfs) configuration file
to load. The default is to use the current camera
configuration unmodified.
camerawidth : int, optional
Specifies the Area-Of-Interest width. The default value is
the value in the current camera configuration.
cameraheight : int, optional
Specifies the Area-Of-Interest height. The default value is
the value in the current camera configuration.
camerapixelformat : string, optional
Specifies the image pixel format. The default value is the
format in the current camera configuration.
camerapacketsize : int, optional
Specifies the Ethernet packet size. The default value is
the value in the current camera configuration.
transactional : string, optional
Sets the event block type to transactional. The default
value is normal.
configfilesection : string, optional
Specifies the name of the section in the connector config
file to parse for configuration parameters. Specify the
value as [configfilesection].
publishwithupsert : boolean, optional
Specifies to build events with opcode=Upsert instead
of opcode=Insert
cameraxoffset : int, optional
Specifies the Area-Of-Interest horizontal offset. The
default value is the value in the current camera configuration.
camerayoffset : int, optional
Specifies the Area-Of-Interest vertical offset. The
default value is the value in the current camera configuration.
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`PylonPublisher`
'''
connector_key = dict(cls='pylon', type='publish')
property_defs = dict(
cameraipaddress=prop('cameraipaddress', dtype='string'),
maxnumframes=prop('maxnumframes', dtype='int'),
maxframerate=prop('maxframerate', dtype='int'),
camerafeaturesfile=prop('camerafeaturesfile', dtype='string'),
camerawidth=prop('camerawidth', dtype='int'),
cameraheight=prop('cameraheight', dtype='int'),
camerapixelformat=prop('camerapixelformat', dtype='string'),
camerapacketsize=prop('camerapacketsize', dtype='int'),
transactional=prop('transactional', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
cameraxoffset=prop('cameraxoffset', dtype='int'),
camerayoffset=prop('camerayoffset', dtype='int'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, name=None, is_active=None,
cameraipaddress=None,
maxnumframes=None, maxframerate=None,
camerafeaturesfile=None, camerawidth=None,
cameraheight=None, camerapixelformat=None,
camerapacketsize=None, transactional=None,
configfilesection=None, publishwithupsert=None,
cameraxoffset=None, camerayoffset=None,
maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'pylon', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
properties = map_properties(cls, properties, delete='type')
return cls(name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/pylon.py
| 0.882428 | 0.36977 |
pylon.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class SMTPSubscriber(Connector):
'''
Subscribe to Simple Mail Transfer Protocol (SMTP) events
Parameters
----------
smtpserver : string
Specifies the SMTP server host name or IP address
sourceaddress : string
Specifies the e-mail address to be used in the “from” field of
the e-mail.
destaddress : string
Specifies the e-mail address to which to send the e-mail message
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
emailperevent : boolean, optional
Specifies true or false. The default is false. If false, each
e-mail body contains a full event block. If true, each mail
body contains a single event.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks
received by a subscriber that were introduced by a window
retention policy.
configfilesection : string, optional
Specifies the name of the section in the connoctor config file
to parse for configuration parameters. Specify the value
as [configfilesection].
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events. The default behavior is these fields are
interpreted as an integer number of seconds (ESP_DATETIME)
or microseconds (ESP_TIMESTAMP) since epoch.
Returns
-------
:class:`SMTPSubscriber`
'''
connector_key = dict(cls='smtp', type='subscribe')
property_defs = dict(
smtpserver=prop('smtpserver', dtype='string', required=True),
sourceaddress=prop('sourceaddress', dtype='string', required=True),
destaddress=prop('destaddress', dtype='string', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
collapse=prop('collapse', dtype='string'),
emailperevent=prop('emailperevent', dtype='boolean'),
rmretdel=prop('rmretdel', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string'),
dateformat=prop('dateformat', dtype='string')
)
def __init__(self, smtpserver=None, sourceaddress=None, destaddress=None,
name=None, is_active=None, snapshot=None,
collapse=None, emailperevent=None,
rmretdel=None, configfilesection=None, dateformat=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'smtp', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['smtpserver',
'sourceaddress',
'destaddress'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/smtp.py
| 0.763043 | 0.175538 |
smtp.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class DatabaseSubscriber(Connector):
'''
Subscribe to database events
Parameters
----------
connectstring : string
Specifies the database DSN and user credentials in the
format 'DSN=dsn;uid=userid;pwd=password;'
desttablename : string
Specifies the target table name
snapshot : boolean, optional
Specifies whether to send snapshot data
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
commitrows : int, optional
Specifies the minimum number of output rows to buffer
commitsecs : int, optional
Specifies the maximum number of seconds to hold onto an incomplete
commit buffer
ignoresqlerrors : boolean, optional
Enables the connector to continue to write Inserts, Updates, and
Deletes to the database table despite an error in a previous Insert,
Update, or Delete.
maxcolbinding : int, optional
Specifies the maximum supported width of string columns.
The default value is 4096.
pwdencrypted : boolean, optional
Specifies that the pwd field in connectstring is encrypted
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
Returns
-------
:class:`DatabaseSubscriber`
'''
connector_key = dict(cls='db', type='subscribe')
property_defs = dict(
connectstring=prop('connectstring', dtype='string', required=True),
desttablename=prop('desttablename', dtype='string', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True),
configfilesection=prop('configfilesection', dtype='string'),
commitrows=prop('commitrows', dtype='int'),
commitsecs=prop('commitsecs', dtype='int'),
ignoresqlerrors=prop('ignoresqlerrors', dtype='boolean'),
maxcolbinding=prop('maxcolbinding', dtype='int'),
pwdencrypted=prop('pwdencrypted', dtype='boolean'),
rmretdel=prop('rmretdel', dtype='boolean')
)
def __init__(self, connectstring=None, desttablename=None, name=None, is_active=None,
snapshot=None, configfilesection=None, commitrows=None,
commitsecs=None, ignoresqlerrors=None, maxcolbinding=None,
pwdencrypted=None, rmretdel=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'db', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['connectstring',
'desttablename'],
delete='type')
return cls(req[0], req[1], name=name, is_active=is_active, **properties)
class DatabasePublisher(Connector):
'''
Subscribe events to a database
Parameters
----------
connectstring : string
Specifies the database DSN and user credentials in the format
'DSN=dsn;uid=userid;pwd=password;'
blocksize : int, optional
Specifies the number of events to include in a published event
block. The default value is 1.
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
greenplumlogminer : boolean, optional
Enables Greenplum log miner mode
logminerdbname : string, optional
Specifies the gpperfmon database that contains the queries_history
table for Greenplum log miner mode. Use the following
format: 'dd-mmm-yyy hh:mm:ss'.
logminerschemaowner : string, optional
Specifies the schema owner when using Oracle or Greenplum log
miner mode.
logminerstartdatetime : string, optional
Specifies the start date time when using Oracle or Greenplum
log miner mode.
logminertablename : string, optional
Specifies the table name when using Oracle or Greenplum log
miner mode.
maxcolbinding : int, optional
Specifies the maximum supported width of string columns.
The default value is 4096.
maxevents : int, optional
Specifies the maximum number of events to publish.
oraclelogminer : boolean, optional
Enables Oracle log miner mode
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert
pwdencrypted : boolean, optional
Specifies that the pwd field in connectstring is encrypted
selectstatement : string, optional
Specifies the SQL statement to be executed on the source database.
Required when oraclelogminer and greenplumlogminer are not enabled.
transactional : string, optional
Sets the event block type to transactional. The default value is normal.
Returns
-------
:class:`DatabasePublisher`
'''
connector_key = dict(cls='db', type='publish')
property_defs = dict(
connectstring=prop('connectstring', dtype='string', required=True),
blocksize=prop('blocksize', dtype='int'),
configfilesection=prop('configfilesection', dtype='string'),
greenplumlogminer=prop('greenplumlogminer', dtype='boolean'),
logminerdbname=prop('logminerdbname', dtype='string'),
logminerschemaowner=prop('logminerschemaowner', dtype='string'),
logminerstartdatetime=prop('logminerstartdatetime', dtype='string'),
logminertablename=prop('logminertablename', dtype='string'),
maxcolbinding=prop('maxcolbinding', dtype='int'),
maxevents=prop('maxevents', dtype='int'),
oraclelogminer=prop('oraclelogminer', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
pwdencrypted=prop('pwdencrypted', dtype='boolean'),
selectstatement=prop('selectstatement', dtype='string'),
transactional=prop('transactional', dtype='string')
)
def __init__(self, connectstring=None, name=None, is_active=None, blocksize=None,
configfilesection=None, greenplumlogminer=None,
logminerdbname=None, logminerschemaowner=None,
logminerstartdatetime=None, logminertablename=None,
maxcolbinding=None, maxevents=None, oraclelogminer=None,
publishwithupsert=None, pwdencrypted=None,
selectstatement=None, transactional=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'db', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['connectstring'],
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/database.py
| 0.838448 | 0.173183 |
database.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class MQTTSubscriber(Connector):
'''
Subscribe to MQ Telemetry Transport (MQTT)
Parameters
----------
mqtthost : string
Specifies the MQTT server host name
mqttclientid : string
Specifies the string to use as the MQTT Client ID. If empty, a random
client ID is generated. If NULL, mqttdonotcleansession must be false.
Must be unique among all clients connected to the MQTT server.
mqtttopic : string
Specifies the string to use as an MQTT topic to publish events to.
mqttqos : string
Specifies the requested Quality of Service. Values can be 0, 1 or 2.
mqttmsgtype : string
Specifies binary, CSV, JSON, or the name of a string field in the
subscribed window schema.
snapshot : boolean, optional
Specifies whether to send snapshot data
mqttuserid : string, optional
Specifies the user name required to authenticate the connector's
session with the MQTT server.
mqttpassword : string, optional
Specifies the password associated with mqttuserid
mqttport : int, optional
Specifies the MQTT server port. Default is 1883.
mqttretainmsg : boolean, optional
Sets to true to make the published message retained in the MQTT
Server. Default is false.
mqttdonotcleansession : boolean, optional
Instructs the MQTT Server to keep all messages and subscriptions on
disconnect, instead of keeping them. Default is false.
mqttkeepaliveinterval : int, optional
Specifies the number of seconds after which the broker should send a
PING message to the client if no other messages have been exchanged
in that time. Default is 10.
mqttmsgmaxdeliveryattempts : int, optional
Specifies the number of times the connector tries to resend the
message in case of failure. Default is 20.
mqttmsgdelaydeliveryattempts : int, optional
Specifies the delay in milliseconds between delivery attempts
specified with mqttmsgmaxdeliveryattempts. Default is 500.
mqttmsgwaitbeforeretry : int, optional
Specifies the number of seconds to wait before retrying to send
messages to the MQTT broker. This applies to publish messages
with QoS > 0. Default is 20.
mqttmaxinflightmsg : int, optional
Specifies the number of QoS 1 and 2 messages that can be simultaneously
in flight. Default is 20.
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber output
publishable. The default value is disabled.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received by
a subscriber that were introduced by a window retention policy.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted as
an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition used to convert event blocks to protobuf messages.
When you specify this parameter, you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the .proto
file that you specified with the protofile parameter. Event blocks
are converted into this message.
mqttssl : boolean, optional
Specifies to use SSL/TLS to connect to the MQTT broker. Default is
false. In order to use SSL/TLS, the ESP encryption overlay
must be installed.
mqttsslcafile : string, optional
If mqttssl=true, specifies the path to a file containing the PEM encoded
trusted CA certificate files. Either mqttsslcafile or mqttsslcapath
must be specified.
mqttsslcapath : string, optional
If mqttssl=true, specifies the path to a directory containing the
PEM encoded trusted CA certificate files. See mosquitto.conf for
more details about configuring this directory. Either mqttsslcafile
or mqttsslcapath must be specified.
mqttsslcertfile : string, optional
If mqttssl=true, specifies the path to a file containing the PEM
encoded certificate file for this client. Both mqttsslcertfile and
mqttsslkeyfile must be provided if one of them is.
mqttsslkeyfile : string, optional
If mqttssl=true, specifies the path to a file containing the PEM
encoded private key for this client. Both mqttsslcertfile and
mqttsslkeyfile must be provided if one of them is.
mqttsslpassword : boolean, optional
If mqttssl=true, and if key file is encrypted, specifies the
password for decryption.
csvincludeschema : string, optional
Specifies "never", "once", or "pereventblock". The default value is
"never". When mqttmsgtype = CSV, prepend output CSV with the window's
serialized schema.
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
mqttpasswordencrypted : boolean, optional
Specifies that mqttpassword is encrypted
addcsvopcode : boolean, optional
Prepends an opcode and comma to input CSV events. The opcode is Insert
unless publishwithupsert is enabled.
addcsvflags : string, optional
Specifies the event type to insert into input CSV events (with a comma).
Valid values are "normal" and "partialupdate".
csvmsgperevent : int, optional
For CSV, specifies to send one message per event. The default is one
message per transactional event block or else one message per event.
csvmsgpereventblock : int, optional
For CSV, specifies to send one message per event block. The default
is one message per transactional event block or else one message per event.
Returns
-------
:class:`MQTTSubscriber`
'''
connector_key = dict(cls='mqtt', type='subscribe')
property_defs = dict(
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
mqtthost=prop('mqtthost', dtype='string', required=True),
mqttclientid=prop('mqttclientid', dtype='string', required=True),
mqtttopic=prop('mqtttopic', dtype='string', required=True),
mqttqos=prop('mqttqos', dtype='string', required=True),
mqttmsgtype=prop('mqttmsgtype', dtype='string', required=True),
mqttuserid=prop('mqttuserid', dtype='string'),
mqttpassword=prop('mqttpassword', dtype='string'),
mqttport=prop('mqttport', dtype='int'),
mqttretainmsg=prop('mqttretainmsg', dtype='boolean'),
mqttdonotcleansession=prop('mqttdonotcleansession', dtype='boolean'),
mqttkeepaliveinterval=prop('mqttkeepaliveinterval', dtype='int'),
mqttmsgmaxdeliveryattempts=prop('mqttmsgmaxdeliveryattempts', dtype='int'),
mqttmsgdelaydeliveryattempts=prop('mqttmsgdelaydeliveryattempts', dtype='int'),
mqttmsgwaitbeforeretry=prop('mqttmsgwaitbeforeretry', dtype='int'),
mqttmaxinflightmsg=prop('mqttmaxinflightmsg', dtype='int'),
collapse=prop('collapse', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
mqttssl=prop('mqttssl', dtype='boolean'),
mqttsslcafile=prop('mqttsslcafile', dtype='string'),
mqttsslcapath=prop('mqttsslcapath', dtype='string'),
mqttsslcertfile=prop('mqttsslcertfile', dtype='string'),
mqttsslkeyfile=prop('mqttsslkeyfile', dtype='string'),
mqttsslpassword=prop('mqttsslpassword', dtype='string'),
csvincludeschema=prop('csvincludeschema', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
mqttpasswordencrypted=prop('mqttpasswordencrypted', dtype='boolean'),
addcsvopcode=prop('addcsvopcode', dtype='boolean'),
addcsvflags=prop('addcsvflags', dtype='string'),
csvmsgperevent=prop('csvmsgperevent', dtype='int'),
csvmsgpereventblock=prop('csvmsgpereventblock', dtype='int')
)
def __init__(self, mqtthost=None, mqttclientid=None, mqtttopic=None,
mqttqos=None, mqttmsgtype=None,
name=None, is_active=None, snapshot=None,
mqttuserid=None, mqttpassword=None, mqttport=None,
mqttretainmsg=None, mqttdonotcleansession=None,
mqttkeepaliveinterval=None, mqttmsgmaxdeliveryattempts=None,
mqttmsgdelaydeliveryattempts=None, mqttmsgwaitbeforeretry=None,
mqttmaxinflightmsg=None, collapse=None, rmretdel=None,
dateformat=None, protofile=None, protomsg=None, mqttssl=None,
mqttsslcafile=None, mqttsslcapath=None, mqttsslcertfile=None,
mqttsslkeyfile=None, mqttsslpassword=None, csvincludeschema=None,
configfilesection=None, mqttpasswordencrypted=None,
addcsvopcode=None, addcsvflags=None, csvmsgperevent=None,
csvmsgpereventblock=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'mqtt', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['mqtthost', 'mqttclientid',
'mqtttopic', 'mqttqos',
'mqttmsgtype'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4],
name=name, is_active=is_active, **properties)
class MQTTPublisher(Connector):
'''
Subscribe to MQ Telemetry Transport (MQTT)
Parameters
----------
mqtthost : string
Specifies the MQTT server host name
mqttclientid : string
Specifies the string to use as the MQTT Client ID. If NULL, a random
client ID is generated. If empty, mqttdonotcleansession must be
false. Must be unique among all clients connected to the MQTT server.
mqtttopic : string
Specifies the string to use as an MQTT subscription topic pattern
mqttqos : string
Specifies the requested Quality of Service. Values can be 0, 1 or 2.
mqttmsgtype : string
Specifies binary, CSV, JSON, or opaquestring
mqttuserid : string, optional
Specifies the user name required to authenticate the connector’s
session with the MQTT server.
mqttpassword : string, optional
Specifies the password associated with mqttuserid
mqttport : int, optional
Specifies the MQTT server port. Default is 1883.
mqttacceptretainedmsg : boolean, optional
Sets to true to accept to receive retained message. Default is false.
mqttcleansession : boolean, optional
Set to true to instruct the MQTT Server to clean all messages and
subscriptions on disconnect, false to instruct it to keep them.
Default is true.
mqttkeepaliveinterval : int, optional
Specifies the number of seconds after which the broker should send
a PING message to the client if no other messages have been
exchanged in that time. Default is 10.
publishwithupsert : boolean, optional
Builds events with opcode=Upsert instead of Insert
transactional : string, optional
When mqttmsgtype=CSV, sets the event block type to transactional.
The default value is normal.
blocksize : int, optional
When mqttmsgtype=CSV, specifies the number of events to include in
a published event block. The default value is 1.
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be parsed,
the event is dropped, an error is logged, and publishing continues.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV events.
The default delimiter is the , character.
noautogenfield : boolean, optional
Specifies that input events are missing the key field that is
automatically generated by the Source window.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted as
an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition used to convert event blocks to protobuf messages.
When you specify this parameter, you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the .proto
file that you specified with the protofile parameter. Event blocks
are converted into this message.
mqttssl : boolean, optional
Specifies to use SSL/TLS to connect to the MQTT broker. Default is
false. In order to use SSL/TLS, the ESP encryption overlay must
be installed.
mqttsslcafile : string, optional
If mqttssl=true, specifies the path to a file containing the PEM
encoded trusted CA certificate files. Either mqttsslcafile or
mqttsslcapath must be specified.
mqttsslcapath : string, optional
If mqttssl=true, specifies the path to a directory containing the
PEM encoded trusted CA certificate files. See mosquitto.conf for
more details about configuring this directory. Either mqttsslcafile
or mqttsslcapath must be specified.
mqttsslcertfile : string, optional
If mqttssl=true, specifies the path to a file containing the PEM
encoded certificate file for this client. Both mqttsslcertfile and
mqttsslkeyfile must be provided if one of them is.
mqttsslkeyfile : string, optional
If mqttssl=true, specifies the path to a file containing the PEM
encoded private key for this client. Both mqttsslcertfile and
mqttsslkeyfile must be provided if one of them is.
mqttsslpassword : string, optional
If mqttssl=true, and if key file is encrypted, specifies the
password for decryption.
addcsvopcode : boolean, optional
Prepends an opcode and comma to input CSV events. The opcode is Insert
unless publishwithupsert is enabled.
addcsvflags : string, optional
Specifies the event type to insert into input CSV events (with a comma).
Valid values are "normal" and "partialupdate".
configfilesection : string, optional
Specifies the name of the section in the config file to parse for
configuration parameters. Specify the value as [configfilesection].
mqttpasswordencrypted : boolean, optional
Specifies that mqttpassword is encrypted
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`MQTTPublisher`
'''
connector_key = dict(cls='mqtt', type='publish')
property_defs = dict(
mqtthost=prop('mqtthost', dtype='string', required=True),
mqttclientid=prop('mqttclientid', dtype='string', required=True),
mqtttopic=prop('mqtttopic', dtype='string', required=True),
mqttqos=prop('mqttqos', dtype='string', required=True),
mqttmsgtype=prop('mqttmsgtype', dtype='string', required=True),
mqttuserid=prop('mqttuserid', dtype='string'),
mqttpassword=prop('mqttpassword', dtype='string'),
mqttport=prop('mqttport', dtype='int'),
mqttacceptretainedmsg=prop('mqttacceptretainedmsg', dtype='boolean'),
mqttcleansession=prop('mqttcleansession', dtype='boolean'),
mqttkeepaliveinterval=prop('mqttkeepaliveinterval', dtype='int'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
transactional=prop('transactional', dtype='string'),
blocksize=prop('blocksize', dtype='int'),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
mqttssl=prop('mqttssl', dtype='boolean'),
mqttsslcafile=prop('mqttsslcafile', dtype='string'),
mqttsslcapath=prop('mqttsslcapath', dtype='string'),
mqttsslcertfile=prop('mqttsslcertfile', dtype='string'),
mqttsslkeyfile=prop('mqttsslkeyfile', dtype='string'),
mqttsslpassword=prop('mqttsslpassword', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
addcsvopcode=prop('addcsvopcode', dtype='boolean'),
addcsvflags=prop('addcsvflags', dtype='string'),
mqttpasswordencrypted=prop('mqttpasswordencrypted', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, mqtthost=None, mqttclientid=None, mqtttopic=None,
mqttqos=None, mqttmsgtype=None, name=None, is_active=None,
mqttuserid=None, mqttpassword=None, mqttport=None,
mqttretainmsg=None, mqttcleansession=None,
mqttkeepaliveinterval=None, publishwithupsert=None,
transactional=None, blocksize=None, ignorecsvparseerrors=None,
csvfielddelimiter=None, noautogenfield=None,
dateformat=None, protofile=None, protomsg=None,
mqttssl=None, mqttsslcafile=None, mqttsslcapath=None,
mqttsslcertfile=None, mqttsslkeyfile=None,
mqttsslpassword=None, configfilesection=None,
addcsvopcode=None, addcsvflags=None,
mqttpasswordencrypted=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'mqtt', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['mqtthost', 'mqttclientid',
'mqtttopic', 'mqttqos',
'mqttmsgtype'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4],
name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/mqtt.py
| 0.776453 | 0.176352 |
mqtt.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class FilePublisher(Connector):
'''
Publish events from a file
Parameters
----------
fsname : string
The filename or path of the file
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
addcsvflags : string, optional
Specifies the event type to insert into input CSV events.
Valid values: 'normal' or 'partialupdate'
addcsvopcode : string, optional
Prepends an opcode and comma to input CSV events.
blocksize : int, optional
Specifies the number of events to include in a published event block
cefsyslogprefix : string, optional
When fstype=cef, specifies that CEF events contain the syslog prefix
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV events
dateformat : string, optional
Specifies the format of datetime and timestamp fields
growinginputfile : boolean, optional
Enables reading from a growing input file by publishers
header : int, optional
Specifies the number of input lines to skip before starting
publish operations.
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be parsed,
the event is dropped, an error is logged, and publishing continues.
maxevents : int, optional
Specifies the maximum number of events to publish
noautogenfield : boolean, optional
Specifies that input events are missing the key field that is
autogenerated by the source window.
prebuffer : boolean, optional
Controls whether event blocks are buffered to an event block vector
before doing any injects.
publishwithupsert : boolean, optional
Build events with opcode=upsert instead of insert
rate : int, optional
Specifies the requested transmit rate in events per second
repeatcount : int, optional
Specifies the number of times to repeat the publish operation
transactional : string, optional
Sets the event block type to transactional.
Returns
-------
:class:`FilePublisher`
'''
connector_key = dict(cls='fs', type='publish')
property_defs = dict(
fsname=prop('fsname', dtype='string', required=True),
fstype=prop('fstype', dtype='string', required=True,
default='csv',
valid_values=['binary', 'csv', 'xml', 'json',
'syslog', 'hdat', 'cef']),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
addcsvflags=prop('addcsvflags', dtype='string',
valid_values=['normal', '']),
addcsvopcode=prop('addcsvopcode', dtype='string',
valid_values=['insert', '']),
blocksize=prop('blocksize', dtype='int', valid_expr='value > 0'),
cefsyslogprefix=prop('blocksize', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
growinginputfile=prop('growinginputfile', dtype='boolean'),
header=prop('header', dtype=('boolean', 'string'),
valid_values=[True, False, 'full']),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
maxevents=prop('maxevents', dtype='int', valid_expr='value >= 0'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
prebuffer=prop('prebuffer', dtype='boolean'),
publish_with_upsert=prop('publishwithupsert', dtype='boolean'),
rate=prop('rate', dtype='int'),
repeatcount=prop('repeatcount', dtype='int', valid_expr='value >= 0'),
transactional=prop('transactional', dtype='string'),
)
def __init__(self, fsname=None, fstype=None, name=None, is_active=None,
snapshot=None, addcsvflags=None,
addcsvopcode=None, blocksize=None, cefsyslogprefix=None,
configfilesection=None, csvfielddelimiter=None,
dateformat=None,
growinginputfile=None, header=None, ignorecsvparseerrors=None,
maxevents=None, noautogenfield=None, prebuffer=None,
publishwithupsert=None, rate=None, repeatcount=None,
transactional=None):
params = dict(**locals())
params.pop('self')
params.pop('is_active')
name = params.pop('name')
Connector.__init__(self, 'fs', name=name, type='publish', is_active=is_active,
properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties, required='fsname',
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
class FileSubscriber(Connector):
'''
Subscribe to events from a file
Parameters
----------
fsname : string
The filename or path of the file
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : boolean, optional
Converts UPDATE_BLOCK events to UPDATE events in order to make
subscriber output publishable.
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
dateformat : string, optional
Specifies the format of datetime and timestamp fields
hdatcashostport : string, optional
Specifies the CAS server host and port
hdatcaspassword : string, optional
Specifies the CAS server password
hdatcasusername : string, optional
Specifies the CAS server user name
hdatfilename : string, optional
Specifies the name of the Objective Analysis Package Data (HDAT)
file to be written to the Hadoop Distributed File System (HDFS).
hdatlasrhostport : string, optional
Specifies the SAS LASR Analytic Server host and port
hdatlasrkey : string, optional
Specifies the path to tklasrkey.sh
hdatmaxdatanodes : int, optional
Specifies the maximum number of data node connections
hdatmaxstringlength : int, optional
Specifies in bytes the fixed size of string fields in Objective
Analysis Package Data (HDAT) files
hdatnumthreads : int, optional
Specifies the size of the thread pool used for multi-threaded
writes to data node socket connections.
hdfsblocksize : int, optional
Specifies in Mbytes the block size used to write an Objective
Analysis Package Data (HDAT) file.
hdfsnumreplicas : int, optional
Specifies the number of Hadoop Distributed File System (HDFS)
replicas created with writing an Objective Analysis Package
Data (HDAT) file.
header : boolean or string, optional
For a CSV subscriber, specifies to write a header row that
shows comma-separated fields.
Valid values: True, False, or 'full' (include opcode flags in header)
maxfilesize : int, optional
Specifies the maximum size in bytes of the subscriber output file
periodicity : int, optional
Specifies the interval in seconds at which the subscriber output
file is closed and a new output file opened.
rate : boolean, optional
When latency mode is enabled, shows this specified rate in generated
output files.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
unbufferedoutputstreams : boolean, optional
Specifies to create an unbuffered stream when writing to a
file or socket.
Returns
-------
:class:`FileSubscriber`
'''
connector_key = dict(cls='fs', type='subscribe')
property_defs = dict(
fsname=prop('fsname', dtype='string', required=True),
fstype=prop('fstype', dtype='string', required=True,
default='csv',
valid_values=['binary', 'csv', 'xml', 'json',
'syslog', 'hdat', 'cef']),
snapshot=prop('snapshot', dtype='bool', required=True, default=False),
collapse=prop('collapse', dtype='bool'),
configfilesection=prop('configfilesection', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
hdatcashostport=prop('hdatcashostport', dtype='string',
valid_values=re.compile(r'\w[\w\-\.]*:\d+')),
hdatcaspassword=prop('hdatcaspassword', dtype='string'),
hdatcasusername=prop('hdatcasusername', dtype='string'),
hdatfilename=prop('hdatfilename', dtype='string'),
hdatlasrhostport=prop('hdatlasrhostport', dtype='string'),
hdatlasrkey=prop('hdatlasrkey', dtype='string'),
hdatmaxdatanodes=prop('hdatmaxdatanodes',
dtype='int', valid_expr='value > 0'),
hdatmaxstringlength=prop('hdatmaxstringlength', dtype='int',
valid_expr='value > 0'),
hdatnumthreads=prop('hdatnumthreads', dtype='int',
valid_expr='value >= 0'),
hdfsblocksize=prop('hdfsblocksize', dtype='int',
valid_expr='value >= 0'),
hdfsnumreplicas=prop('hdfsnumreplicas', dtype='int',
valid_expr='value >= 0'),
header=prop('header', dtype=('bool', 'string'),
valid_values=[True, False, 'full']),
maxfilesize=prop('maxfilesize', dtype='int',
valid_expr='value >= 0'),
periodicity=prop('periodicity', dtype='int',
valid_expr='value >= 0'),
rate=prop('rate', dtype='bool'),
rmretdel=prop('rmretdel', dtype='bool'),
unbufferedoutputstreams=prop('unbufferedoutputstreams', dtype='bool'),
)
def __init__(self, fsname=None, fstype=None, name=None, is_active=None,
snapshot=None, collapse=None, configfilesection=None,
dateformat=None, hdatcashostport=None,
hdatcaspassword=None, hdatcasusername=None,
hdatfilename=None, hdatlasrhostport=None, hdatlasrkey=None,
hdatmaxdatanodes=None, hdatmaxstringlength=None,
hdatnumthreads=None, hdfsblocksize=None,
hdfsnumreplicas=None, header=None, maxfilesize=None,
periodicity=None, rate=None, rmretdel=None,
unbufferedoutputstreams=None):
params = dict(**locals())
params.pop('self')
params.pop('is_active')
name = params.pop('name')
Connector.__init__(self, 'fs', name=name, type='subscribe', is_active=is_active,
properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties, required='fsname',
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
class SocketPublisher(FilePublisher):
'''
Publish events from a socket
Parameters
----------
port : int
The port number to publish on
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
addcsvflags : string, optional
Specifies the event type to insert into input CSV events.
Valid values: 'normal' or 'partialupdate'
addcsvopcode : string, optional
Prepends an opcode and comma to input CSV events.
blocksize : int, optional
Specifies the number of events to include in a published event block
cefsyslogprefix : string, optional
When fstype=cef, specifies that CEF events contain the syslog prefix
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV events
dateformat : string, optional
Specifies the format of datetime and timestamp fields
growinginputfile : boolean, optional
Enables reading from a growing input file by publishers
header : int, optional
Specifies the number of input lines to skip before starting
publish operations.
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be parsed,
the event is dropped, an error is logged, and publishing continues.
maxevents : int, optional
Specifies the maximum number of events to publish
noautogenfield : boolean, optional
Specifies that input events are missing the key field that is
autogenerated by the source window.
prebuffer : boolean, optional
Controls whether event blocks are buffered to an event block vector
before doing any injects.
publishwithupsert : boolean, optional
Build events with opcode=upsert instead of insert
rate : int, optional
Specifies the requested transmit rate in events per second
repeatcount : int, optional
Specifies the number of times to repeat the publish operation
transactional : string, optional
Sets the event block type to transactional.
Returns
-------
:class:`SocketPublisher`
'''
connector_key = dict(cls='fs', type='publish', fsname=re.compile(r':(@\w+@|\d+)$'))
def __init__(self, port=None, fstype=None, name=None, is_active=None,
snapshot=None, addcsvflags=None,
addcsvopcode=None, blocksize=None, cefsyslogprefix=None,
configfilesection=None, csvfielddelimiter=None,
dateformat=None,
growinginputfile=None, header=None, ignorecsvparseerrors=None,
maxevents=None, noautogenfield=None, prebuffer=None,
publishwithupsert=None, rate=None, repeatcount=None,
transactional=None):
params = dict(locals())
params.pop('is_active')
if isinstance(port, six.string_types):
port = port.replace(':', '')
if port is not None:
params['fsname'] = ':%s' % int(port)
params.pop('port', None)
FilePublisher.__init__(**params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties, required='fsname',
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
class SocketSubscriber(FileSubscriber):
'''
Subscribe to events from a socket
Parameters
----------
host : string
The host name where the socket exists
port : int
The port number on the server
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : boolean, optional
Converts UPDATE_BLOCK events to UPDATE events in order to make
subscriber output publishable.
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
dateformat : string, optional
Specifies the format of datetime and timestamp fields
hdatcashostport : string, optional
Specifies the CAS server host and port
hdatcaspassword : string, optional
Specifies the CAS server password
hdatcasusername : string, optional
Specifies the CAS server user name
hdatfilename : string, optional
Specifies the name of the Objective Analysis Package Data (HDAT)
file to be written to the Hadoop Distributed File System (HDFS).
hdatlasrhostport : string, optional
Specifies the SAS LASR Analytic Server host and port
hdatlasrkey : string, optional
Specifies the path to tklasrkey.sh
hdatmaxdatanodes : int, optional
Specifies the maximum number of data node connections
hdatmaxstringlength : int, optional
Specifies in bytes the fixed size of string fields in Objective
Analysis Package Data (HDAT) files
hdatnumthreads : int, optional
Specifies the size of the thread pool used for multi-threaded
writes to data node socket connections.
hdfsblocksize : int, optional
Specifies in Mbytes the block size used to write an Objective
Analysis Package Data (HDAT) file.
hdfsnumreplicas : int, optional
Specifies the number of Hadoop Distributed File System (HDFS)
replicas created with writing an Objective Analysis Package
Data (HDAT) file.
header : boolean or string, optional
For a CSV subscriber, specifies to write a header row that
shows comma-separated fields.
Valid values: True, False, or 'full' (include opcode flags in header)
maxfilesize : int, optional
Specifies the maximum size in bytes of the subscriber output file
periodicity : int, optional
Specifies the interval in seconds at which the subscriber output
file is closed and a new output file opened.
rate : boolean, optional
When latency mode is enabled, shows this specified rate in generated
output files.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
unbufferedoutputstreams : boolean, optional
Specifies to create an unbuffered stream when writing to a
file or socket.
Returns
-------
:class:`SocketSubscriber`
'''
connector_key = dict(cls='fs', type='subscribe',
fsname=re.compile(r'^(@\w+@|[\w+\.\-]+):(@\w+@|\d+)$'))
def __init__(self, host=None, port=None, fstype=None, name=None, is_active=None,
snapshot=None, collapse=None, configfilesection=None,
dateformat=None, hdatcashostport=None,
hdatcaspassword=None, hdatcasusername=None,
hdatfilename=None, hdatlasrhostport=None, hdatlasrkey=None,
hdatmaxdatanodes=None, hdatmaxstringlength=None,
hdatnumthreads=None, hdfsblocksize=None, hdfsnumreplicas=None,
header=None, maxfilesize=None, periodicity=None,
rate=None, rmretdel=None, unbufferedoutputstreams=None):
params = dict(locals())
params.pop('is_active')
if host is not None and port is not None:
params['fsname'] = '%s:%s' % (host, int(port))
params.pop('host', None)
params.pop('port', None)
FileSubscriber.__init__(**params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties, required='fsname',
delete='type')
if req[0] is not None:
host, port = req[0].split(':', 1)
return cls(host, port, name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/fs.py
| 0.771241 | 0.296424 |
fs.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class TibcoSubscriber(Connector):
'''
Subscribe to Tibco Rendezvous (RV) events
Parameters
----------
tibrvsubject : string
Specifies the Tibco RV subject name
tibrvtype : string
Specifies binary, CSV, JSON, or the name of a string field in
the subscribed window schema.
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
tibrvservice : string, optional
Specifies the Rendezvous service used by the Tibco RV transport
created by the connector. The default service name is “rendezvous”.
tibrvnetwork : string, optional
Specifies the network interface used by the Tibco RV transport
created by the connector. The default network depends on the
type of daemon used by the connector.
tibrvdaemon : string, optional
Specifies the Rendezvous daemon used by the connector. The
default is the default socket created by the local daemon.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted
as an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition. This definition is used to convert event
blocks to protobuf messages. When you specify this parameter,
you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
csvmsgperevent : int, optional
For CSV, specifies to send one message per event. The default is
one message per transactional event block or else one message
per event.
csvmsgpereventblock : int, optional
For CSV, specifies to send one message per event block. The
default is one message per transactional event block or else one
message per event.
Returns
-------
:class:`TibcoSubscriber`
'''
connector_key = dict(cls='tibrv', type='subscribe')
property_defs = dict(
tibrvsubject=prop('tibrvsubject', dtype='string', required=True),
tibrvtype=prop('tibrvtype', dtype='string', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
collapse=prop('collapse', dtype='string'),
tibrvservice=prop('tibrvservice', dtype='string'),
tibrvnetwork=prop('tibrvnetwork', dtype='string'),
tibrvdaemon=prop('tibrvdaemon', dtype='string'),
rmretdel=prop('rmretdel', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
csvmsgperevent=prop('csvmsgperevent', dtype='int'),
csvmsgpereventblock=prop('csvmsgpereventblock', dtype='int')
)
def __init__(self, tibrvsubject=None, tibrvtype=None,
name=None, is_active=None,
snapshot=None, collapse=None, tibrvservice=None,
tibrvnetwork=None, tibrvdaemon=None, rmretdel=None,
dateformat=None, configfilesection=None,
protofile=None, protomsg=None, csvmsgperevent=None,
csvmsgpereventblock=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tibrv', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tibrvsubject',
'tibrvtype'],
delete='type')
return cls(req[0], req[1], name=name, is_active=is_active, **properties)
class TibcoPublisher(Connector):
'''
Subscribe to Tibco Rendezvous (RV) events
Parameters
----------
tibrvsubject : string
Specifies the Tibco RV subject name
tibrvtype : string
Specifies binary, CSV, JSON, or opaquestring. For opaquestring,
the Source window schema is assumed to be "index:int64,message:string".
blocksize : int, optional
Specifies the number of events to include in a published
event block. The default value is 1.
transactional : string, optional
Sets the event block type to transactional. The default value
is normal.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events. The default behavior is these fields are
interpreted as an integer number of seconds (ESP_DATETIME)
or microseconds (ESP_TIMESTAMP) since epoch.
tibrvservice : string, optional
Specifies the Rendezvous service used by the Tibco RV
transport created by the connector. The default service
name is “rendezvous”.
tibrvnetwork : string, optional
Specifies the network interface used by the Tibco RV transport
created by the connector. The default network depends
on the type of daemon used by the connector.
tibrvdaemon : string, optional
Specifies the Rendezvous daemon used by the connector. The
default is the default socket created by the local daemon.
configfilesection : string, optional
Specifies the name of the section in the connector config
file to parse for configuration parameters. Specify the
value as [configfilesection].
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be
parsed, the event is dropped, an error is logged, and
publishing continues.
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition. This definition is used to
convert event blocks to protobuf messages. When you specify
this parameter, you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in
the .proto file that you specified with the protofile
parameter. Event blocks are converted into this message.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input
CSV events. The default delimiter is the , character.
noautogenfield : boolean, optional
Specifies that input events are missing the key field that
is autogenerated by the source window.
publishwithupsert : boolean, optional
Specifies to build events with opcode = Upsert instead of Insert.
addcsvopcode : boolean, optioanl
Prepends an opcode and comma to input CSV events. The
opcode is Insert unless publishwithupsert is enabled.
addcsvflags : string, optional
Specifies the event type to insert into input CSV events
(with a comma). Valid values are "normal" and "partialupdate".
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`TibcoPublisher`
'''
connector_key = dict(cls='tibrv', type='publish')
property_defs = dict(
tibrvsubject=prop('tibrvsubject', dtype='string', required=True),
tibrvtype=prop('tibrvtype', dtype='string', required=True),
blocksize=prop('blocksize', dtype='int'),
transactional=prop('transactional', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
tibrvservice=prop('tibrvservice', dtype='string'),
tibrvnetwork=prop('tibrvnetwork', dtype='string'),
tibrvdaemon=prop('tibrvdaemon', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
addcsvopcode=prop('addcsvopcode', dtype='boolean'),
addcsvflags=prop('addcsvflags', dtype='string'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, tibrvsubject=None, tibrvtype=None,
name=None, is_active=None,
blocksize=None, transactional=None, dateformat=None,
tibrvservice=None, tibrvnetwork=None, tibrvdaemon=None,
configfilesection=None, ignorecsvparseerrors=None,
protofile=None, protomsg=None, csvfielddelimiter=None,
noautogenfield=None, publishwithupsert=None,
addcsvopcode=None, addcsvflags=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tibrv', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tibrvsubject',
'tibrvtype'],
delete='type')
return cls(req[0], req[1], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/tibco.py
| 0.826327 | 0.221056 |
tibco.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class TeradataSubscriber(Connector):
'''
Subscribe to Teradata operations
Parameters
----------
tdatatdpid : string
Specifies the target Teradata server name
desttablename : string
Specifies the target table name
tdatausername : string
Specifies the user name for the user account on the target
Teradata server.
tdatauserpwd : string
Specifies the user password for the user account on the target
Teradata server.
tdatamaxsessions : int
Specifies the maximum number of sessions created by the TPT to
the Teradata server.
tdataminsessions : int
Specifies the minimum number of sessions created by the TPT to
the Teradata server.
tdatadriver : string
Specifies the operator: stream, update, or load.
tdatainsertonly : boolean
Specifies whether events in the subscriber event stream processing
window are insert only. Must be true when using the load operator.
snapshot : boolean
Specifies whether to send snapshot data
rmretdel : boolean, optional
Removes all delete events from event blocks received by the
subscriber that were introduced by a window retention policy.
tdatabatchperiod : int, optional
Specifies the batch period in seconds. Required when using the
update operator, otherwise ignored.
stage1tablename : string, optional
Specifies the first staging table. Required when using the load
operator, otherwise ignored.
stage2tablename : string, optional
Specifies the second staging table. Required when using the load
operator, otherwise ignored.
connectstring : string, optional
Specifies the connect string used to access the target and
staging tables. Use the form “DSN=dsnname;UID=userid;pwd=password”.
Required when using the load operator, otherwise ignored.
connectstring : string, optional
Specifies the connect string used to access the target and
staging tables.
tdatatracelevel : int, optional
Specifies the trace level for Teradata messages written to the
trace file in the current working directory.
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
tdatauserpwdencrypted : boolean, optional
Specifies that tdatauserpwd is encrypted
Returns
-------
:class:`TeradataSubscriber`
'''
connector_key = dict(cls='tdata', type='subscribe')
property_defs = dict(
tdatatdpid=prop('tdatatdpid', dtype='string', required=True),
desttablename=prop('desttablename', dtype='string', required=True),
tdatausername=prop('tdatausername', dtype='string', required=True),
tdatauserpwd=prop('tdatauserpwd', dtype='string', required=True),
tdatamaxsessions=prop('tdatamaxsessions', dtype='int', required=True),
tdataminsessions=prop('tdataminsessions', dtype='int', required=True),
tdatadriver=prop('tdatadriver', dtype='string', required=True),
tdatainsertonly=prop('tdatainsertonly', dtype='boolean', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
rmretdel=prop('rmretdel', dtype='boolean'),
tdatabatchperiod=prop('tdatabatchperiod', dtype='int'),
stage1tablename=prop('stage1tablename', dtype='string'),
stage2tablename=prop('stage2tablename', dtype='string'),
connectstring=prop('connectstring', dtype='string'),
tdatatracelevel=prop('tdatatracelevel', dtype='int'),
configfilesection=prop('configfilesection', dtype='string'),
tdatauserpwdencrypted=prop('tdatauserpwdencrypted', dtype='boolean')
)
def __init__(self, tdatatdpid=None, desttablename=None, tdatausername=None,
tdatauserpwd=None, tdataminsessions=None, tdatamaxsessions=None,
tdatadriver=None, tdatainsertonly=None,
name=None, is_active=None, snapshot=None,
rmretdel=None, tdatabatchperiod=None, stage1tablename=None,
stage2tablename=None, connectstring=None, tdatatracelevel=None,
configfilesection=None, tdatauserpwdencrypted=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tdata', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tdatatdpid',
'desttablename',
'tdatausername',
'tdatauserpwd',
'tdataminsessions',
'tdatamaxsessions',
'tdatadriver',
'tdatainsertonly'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
req[7], name=name, is_active=is_active, **properties)
class TeradataListenerSubscriber(Connector):
'''
Subscribe to Teradata Listener events
Parameters
----------
ingestUrl : string
Specifies the URL for the Listener Ingest REST API (version 1)
SSLCert : strirng
Specifies the path to a file that contains SSL certificates securely
connect to the Listener Ingest service. Listener uses TLS 1.2.
sourceKey : string
Specifies the Listener source secret key that identifies the
Listener source feed to which Event Stream Processing sends data.
ingestBlocksize : int, optional
Specifies the maximum number of data rows to send in one Listener
Ingest message. Matching the connector block size to the Source
window block size is recommended. The default block size is 256.
contentType : string, optional
Specifies the format of the data sent from Event Stream Processing
to Listener, either JSON or plaintext (comma-delimited).
The default is JSON.
ingestDelim : string, optional
Specifies the character that delimits data rows in a multi-row
message from Event Stream Processing to the Listener Ingest
REST API. The delimiter must not be a JSON punctuation character.
The default is a tilde (~).
snapshot : boolean, optional
Specifies whether to send snapshot data.
Returns
-------
:class:`TeradataListenerSubscriber`
'''
connector_key = dict(cls='tdlistener', type='subscribe')
property_defs = dict(
ingestUrl=prop('ingestUrl', dtype='string', required=True),
SSLCert=prop('SSLCert', dtype='string', required=True),
sourceKey=prop('sourceKey', dtype='string', required=True),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
ingestBlocksize=prop('ingestBlocksize', dtype='int'),
contentType=prop('contentType', dtype='string'),
ingestDelim=prop('ingestDelim', dtype='string')
)
def __init__(self, ingestUrl, SSLCert, sourceKey, name=None,
is_active=None, snapshot=None, ingestBlocksize=None,
contentType=None, ingestDelim=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tdlistener', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['ingestUrl',
'SSLCert',
'sourceKey'],
delete='type')
return cls(req[0], req[1], req[2], name=name, is_active=is_active, **properties)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/connectors/teradata.py
| 0.845879 | 0.420183 |
teradata.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import re
import six
import warnings
import xml.etree.ElementTree as ET
from .keyword import keywordify
from ..base import ESPObject
def _cast_attrs(attrs):
out = {}
for name, value in (attrs or {}).items():
if value is None:
continue
elif value is True:
value = 'true'
elif value is False:
value = 'false'
else:
value = '%s' % value
out[name.replace('_', '-')] = value
return out
def new_elem(elem_name, attrib=None, text_content=None, **kwargs):
'''
Create a new element
Parameters
----------
elem_name : string
The tag name of the element
attrib : dict, optional
The attributes to set
text_content : string, optional
The text content of the new element
**kwargs : keyword arguments, optional
Additional attributes as keyword arguments
Returns
-------
:class:`ElementTree.Element`
'''
attrib = _cast_attrs(attrib)
kwargs = _cast_attrs(kwargs)
out = ET.Element(elem_name, attrib=attrib, **kwargs)
if text_content is not None:
out.text = '%s' % text_content
return out
def add_elem(parent_elem, child_elem, attrib=None,
text_content=None, **kwargs):
'''
Add a new element to the specified parent element
Parameters
----------
parent_elem : ElementTree.Element
The parent element
child_elem : string
The name of an element or an XML fragment
attrib : dict, optional
The attributes to set
text_content : string, optional
The text content of the new element
**kwargs : keyword arguments, optional
Additional attributes as keyword arguments
Returns
-------
:class:`ElementTree.Element`
'''
attrib = _cast_attrs(attrib)
kwargs = _cast_attrs(kwargs)
# child_elem is an Element
if isinstance(child_elem, ET.Element):
out = child_elem
if attrib:
for key, value in attrib.items():
out.set(key, value)
for key, value in kwargs.items():
out.set(key, value)
parent_elem.append(out)
# child_elem is an XML fragment
elif re.match(r'^\s*<', child_elem):
out = ET.fromstring(child_elem)
if attrib:
for key, value in attrib.items():
out.set(key, value)
for key, value in kwargs.items():
out.set(key, value)
parent_elem.append(out)
# child_elem is an element name
else:
out = ET.SubElement(parent_elem, child_elem, attrib=attrib, **kwargs)
if text_content is not None:
out.text = '%s' % text_content
return out
def add_properties(elem, *args, **kwargs):
'''
Add a ``properties`` node to the given element
Parameters
----------
elem : ElementTree.Element
The element to add properties to
verbatim : boolean, optional
Should property names be used verbatim?
*args : two-element-tuples, optional
Passed to dict constructor as properties
**kwargs : keyword arguments, optional
Passed to dict constructor as properties
Returns
-------
:class:`ElementTree.Element`
'''
verbatim = kwargs.pop('verbatim', False)
bool_as_int = kwargs.pop('bool_as_int', False)
props = add_elem(elem, 'properties')
for key, value in sorted(dict(*args, **kwargs).items()):
if value is None:
continue
if isinstance(value, (list, tuple, set)):
value = ','.join(value)
elif value is True:
if bool_as_int:
value = '1'
else:
value = 'true'
elif value is False:
if bool_as_int:
value = '0'
else:
value = 'false'
add_elem(props, 'property',
dict(name=keywordify(key)), text_content=value)
return props
def xml_indent(elem, level=0):
'''
Add whitespace to XML for pretty-printing
Parameters
----------
elem : ElementTree.Element
The element to modify with whitespace
level : int, optional
The level of indent
Returns
-------
``None``
'''
i = '\n' + (level * ' ')
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
xml_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def from_xml(data):
'''
Convert XML to ElementTree.Element
Parameters
----------
data : string
The XML to parse
Returns
-------
:class:`ElementTree.Element`
'''
try:
return ET.fromstring(data)
except:
for i, line in enumerate(data.split('\n')):
print(i+1, line)
raise
def to_xml(elem, encoding=None, pretty=False):
'''
Export element to XML
Parameters
----------
elem : ElementTree.Element or xml-string
The element to export
encoding : string, optional
The output encoding
Returns
-------
string
'''
if isinstance(elem, six.string_types):
elem = ET.fromstring(elem)
# In-place editing!!
if pretty:
xml_indent(elem)
if encoding is None:
return ET.tostring(elem, encoding='utf-8').decode()
return ET.tostring(elem, encoding=encoding)
def get_attrs(obj, extra=[], exclude=[]):
'''
Retrieve XML attributes from object
If ``obj`` has an ``xml_map`` dictionary attribute, it indicates
the object attr to xml attr mapping.
class MyObject(object):
xml_map = dict(object_attr='xml_attr',
same_name_object_attr='same_name_object_attr')
Parameters
----------
obj : object
The object to get attributes from
Returns
-------
dict
'''
if isinstance(exclude, six.string_types):
exclude = [exclude]
out = obj._get_attributes()
if isinstance(extra, six.string_types):
extra = [extra]
if extra:
for item in extra:
out[item] = getattr(obj, item)
if exclude:
for item in exclude:
out.pop(item, None)
return {k: '%s' % v for k, v in out.items() if v is not None}
def ensure_element(data):
'''
Ensure the given object is an ElementTree.Element
Parameters
----------
data : string or Element
Returns
-------
:class:`ElementTree.Element`
'''
if isinstance(data, six.string_types):
return from_xml(data)
return data
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/utils/xml.py
| 0.819026 | 0.160759 |
xml.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import re
import sys
_AUTHINFO_PATHS = [
'_authinfo.gpg',
'.authinfo.gpg',
'_netrc.gpg',
'.netrc.gpg',
'_authinfo',
'.authinfo',
'_netrc',
'.netrc',
]
if 'win' not in sys.platform.lower():
_AUTHINFO_PATHS = [aipath for aipath in _AUTHINFO_PATHS if not aipath.startswith('_')]
_ALIASES = {
'machine': 'host',
'login': 'user',
'account': 'user',
'port': 'protocol',
}
def _chunker(seq, size):
''' Read sequence `seq` in `size` sized chunks '''
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def _matches(params, **kwargs):
''' See if keyword arguments are a subset of `params` '''
for key, value in kwargs.items():
if value is None:
continue
if key not in params:
continue
if params.get(key) != value:
return False
return True
def parseparams(param):
'''
Parse the next parameter from the string
Parameters
----------
param : string
The string to parse
Returns
-------
dict
Key/value pairs parsed from the string
'''
out = {}
if not param:
return out
siter = iter(param)
name = []
for char in siter:
if not char.strip():
break
name.append(char)
value = []
for char in siter:
if not char.strip():
break
if char == '"':
for subchar in siter:
if subchar == '\\':
value.append(next(siter))
elif subchar == '"':
break
else:
value.append(char)
name = ''.join(name)
value = ''.join(value)
out[_ALIASES.get(name, name)] = value
out.update(parseparams((''.join(list(siter))).strip()))
return out
def query_authinfo(host, user=None, protocol=None, path=None):
'''
Look for a matching host definition in authinfo/netrc files
Parameters
----------
host : string
The host name or IP address to match
user : string, optional
The username to match
protocol : string or int, optional
The protocol or port to match
path : string or list of strings, optional
The paths to look for instead of the automatically detected paths
Returns
-------
dict
Connection information
'''
paths = []
# Construct list of authinfo/netrc paths
if path is None:
if os.environ.get('AUTHINFO'):
paths = [os.path.expanduser(x)
for x in os.environ.get('AUTHINFO').split(os.path.sep)]
elif os.environ.get('NETRC'):
paths = [os.path.expanduser(x)
for x in os.environ.get('NETRC').split(os.path.sep)]
else:
home = os.path.expanduser('~')
for item in _AUTHINFO_PATHS:
paths.append(os.path.join(home, item))
elif not isinstance(path, (tuple, list, set)):
paths = [os.path.expanduser(path)]
else:
paths = [os.path.expanduser(x) for x in path]
# Parse each file
for path in paths:
if not os.path.exists(path):
continue
# Remove comments and macros
lines = []
try:
with open(path) as info:
infoiter = iter(info)
for line in infoiter:
line = line.strip()
# Bypass comments
if line.startswith('#'):
continue
# Bypass macro definitions
if line.startswith('macdef'):
for line in infoiter:
if not line.strip():
break
continue
lines.append(line)
except OSError:
continue
line = ' '.join(lines)
# Parse out definitions and look for matches
defs = [x for x in re.split(r'\b(host|machine|default)\b\s*', line) if x.strip()]
for name, value in _chunker(defs, 2):
if name in ['host', 'machine']:
hostname, value = re.split(r'\s+', value, 1)
out = parseparams(value)
out['host'] = hostname.lower()
if _matches(out, host=host.lower(), user=user, protocol=protocol):
return out
else:
out = parseparams(value)
if _matches(out, user=user, protocol=protocol):
return out
return {}
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/utils/authinfo.py
| 0.506836 | 0.15925 |
authinfo.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import copy
import re
import six
def _is_compound_key(key, types=six.string_types + (six.text_type, six.binary_type)):
'''
Check for a compound key name
Parameters
----------
key : string
The key name to check
types : list of types, optional
The types of object to check
Returns
-------
True
If the key is compound (i.e., contains a '.')
False
If the key is not compound
'''
return isinstance(key, types) and '.' in key
class xdict(dict):
'''
Nested dictionary that allows setting of nested keys using '.' delimited strings
Keys with a '.' in them automatically get split into separate keys.
Each '.' in a key represents another level of nesting in the resulting
dictionary.
Parameters
----------
*args, **kwargs : Arbitrary arguments and keyword arguments
Same arguments as `dict`
Returns
-------
xdict object
Examples
--------
>>> dct = xdict()
>>> dct['a.b.c'] = 100
{'a': {'b': {'c': 100}}}
'''
def __init__(self, *args, **kwargs):
super(xdict, self).__init__()
self.update(*args, **kwargs)
def __dir__(self):
if hasattr(self, '_dir') and self._dir:
return list(self._dir)
return super(xdict, self).__dir__()
def set_dir_values(self, values):
'''
Set the valid values for keys to display in tab-completion
Parameters
----------
values : iterable
The values to display
'''
super(xdict, self).__setattr__('_dir', values)
def set_doc(self, docstring):
''' Set the docstring for the xdict '''
super(xdict, self).__setattr__('__doc__', docstring)
def __copy__(self):
return type(self)(**self)
def __deepcopy__(self, memo):
out = type(self)()
for key, value in six.iteritems(self):
if isinstance(value, (dict, list, tuple, set)):
value = copy.deepcopy(value)
out[key] = value
return out
@classmethod
def from_json(cls, jsonstr):
'''
Create an xdict object from a JSON string
Parameters
----------
jsonstr : string
Valid JSON string that represents an object
Returns
-------
xdict object
'''
import json
out = cls()
out.update(json.loads(jsonstr))
return out
def __setitem__(self, key, value):
''' Set a key/value pair in an xdict object '''
if isinstance(value, dict) and not isinstance(value, type(self)):
value = type(self)(value)
if _is_compound_key(key):
return self._xset(key, value)
return super(xdict, self).__setitem__(key, value)
def _xset(self, key, value):
'''
Set a key/value pair allowing nested levels in the key
Parameters
----------
key : any
Key value, if it is a string delimited by periods (.), each
period represents another level of nesting of xdict objects.
value : any
Data value
Returns
-------
None
'''
if isinstance(value, dict) and not isinstance(value, type(self)):
value = type(self)(value)
if _is_compound_key(key):
current, key = key.split('.', 1)
if current not in self:
self[current] = type(self)()
return self[current]._xset(key, value)
self[key] = value
def setdefault(self, key, *default):
''' Return keyed value, or set it to `default` if missing '''
if _is_compound_key(key):
try:
return self[key]
except KeyError:
if default:
default = default[0]
if isinstance(default, dict) and not isinstance(default, type(self)):
default = type(self)(default)
else:
default = None
self[key] = default
return default
return super(xdict, self).setdefault(key, *default)
def __contains__(self, key):
''' Does the xdict contain `key`? '''
if super(xdict, self).__contains__(key):
return True
return key in self.allkeys()
has_key = __contains__
def __getitem__(self, key):
''' Get value stored at `key` '''
if _is_compound_key(key):
return self._xget(key)
return super(xdict, self).__getitem__(key)
def _xget(self, key, *default):
'''
Return keyed value, or `default` if missing
Parameters
----------
key : any
Key to look up
*default : any
Default value to return if key is missing
Returns
-------
any
'''
if _is_compound_key(key):
current, key = key.split('.', 1)
try:
return self[current]._xget(key)
except KeyError:
if default:
return default[0]
raise KeyError(key)
return self[key]
def get(self, key, *default):
''' Return keyed value, or `default` if missing '''
if _is_compound_key(key):
return self._xget(key, *default)
return super(xdict, self).get(key, *default)
def __delitem__(self, key):
''' Deleted keyed item '''
if _is_compound_key(key):
return self._xdel(key)
super(xdict, self).__delitem__(key)
def _xdel(self, key):
'''
Delete keyed item
Parameters
----------
key : any
Key to delete. If it is a string that is period (.) delimited,
each period represents another level of nesting of xdict objects.
Returns
-------
None
'''
if _is_compound_key(key):
current, key = key.split('.', 1)
try:
return self[current]._xdel(key)
except KeyError:
raise KeyError(key)
del self[key]
def pop(self, key, *default):
''' Remove and return value stored at `key` '''
try:
out = self[key]
del self[key]
return out
except KeyError:
if default:
return default[0]
raise KeyError(key)
def _flatten(self, dct, output, prefix=''):
'''
Create a new dict with keys flattened to period (.) delimited keys
Parameters
----------
dct : dict
The dictionary to flatten
output : dict
The resulting dictionary (used internally in recursion)
prefix : string
Key prefix built from upper levels of nesting
Returns
-------
dict
'''
if prefix:
prefix = prefix + '.'
for key, value in six.iteritems(dct):
if isinstance(value, dict):
if isinstance(key, int):
intkey = '%s[%s]' % (re.sub(r'\.$', r'', prefix), key)
self._flatten(value, prefix=intkey, output=output)
else:
self._flatten(value, prefix=prefix + key, output=output)
else:
if isinstance(key, int):
intkey = '%s[%s]' % (re.sub(r'\.$', r'', prefix), key)
output[intkey] = value
else:
output[prefix + key] = value
def flattened(self):
''' Return an xdict with keys flattened to period (.) delimited strings '''
output = {}
self._flatten(self, output)
return output
def allkeys(self):
''' Return a list of all possible keys (even sub-keys) in the xdict '''
out = set()
for key in self.flatkeys():
out.add(key)
while '.' in key:
key = key.rsplit('.', 1)[0]
out.add(key)
if '[' in key:
out.add(re.sub(r'\[\d+\]', r'', key))
return list(out)
def flatkeys(self):
''' Return a list of flattened keys in the xdict '''
return list(self.flattened().keys())
def flatvalues(self):
''' Return a list of flattened values in the xdict '''
return list(self.flattened().values())
def flatitems(self):
''' Return tuples of flattened key/value pairs '''
return list(self.flattened().items())
def iterflatkeys(self):
''' Return iterator of flattened keys '''
return six.iterkeys(self.flattened())
def iterflatvalues(self):
''' Return iterator of flattened values '''
return six.itervalues(self.flattened())
def iterflatitems(self):
''' Return iterator of flattened items '''
return six.iteritems(self.flattened())
def viewflatkeys(self):
''' Return view of flattened keys '''
return six.viewkeys(self.flattened())
def viewflatvalues(self):
''' Return view of flattened values '''
return six.viewvalues(self.flattened())
def viewflatitems(self):
''' Return view of flattened items '''
return six.viewitems(self.flattened())
def update(self, *args, **kwargs):
''' Merge the key/value pairs into `self` '''
for arg in args:
if isinstance(arg, dict):
for key, value in six.iteritems(arg):
self._xset(key, value)
else:
for key, value in arg:
self._xset(key, value)
for key, value in six.iteritems(kwargs):
self._xset(key, value)
def to_json(self):
'''
Convert an xdict object to a JSON string
Returns
-------
string
'''
import json
return json.dumps(self)
class xadict(xdict):
'''
An xdict that also allows setting/getting/deleting keys as attributes
'''
getdoc = None
trait_names = None
def _getAttributeNames(self):
''' Block this from creating attributes '''
return
def __delattr__(self, key):
''' Delete the attribute stored at `key` '''
if key.startswith('_') and key.endswith('_'):
return super(xadict, self).__delattr__(key)
del self[key]
def __getattr__(self, key):
''' Get the attribute store at `key` '''
if key.startswith('_') and key.endswith('_'):
return super(xadict, self).__getattr__(key)
try:
return self[key]
except KeyError:
dct = type(self)()
self[key] = dct
return dct
def __getitem__(self, key):
''' Get item of an integer creates a new dict '''
if isinstance(key, int) and key not in self:
out = type(self)()
self[key] = out
return out
return super(xadict, self).__getitem__(key)
def __setattr__(self, key, value):
''' Set the attribute stored at `key` '''
if key.startswith('_') and key.endswith('_'):
return super(xadict, self).__setattr__(key, value)
self[key] = value
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/utils/xdict.py
| 0.891055 | 0.273462 |
xdict.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import contextlib
import os
import re
import six
import types
import weakref
from six.moves.urllib.parse import urlparse
from .xdict import xdict
from ..exceptions import ESPOptionError
# pylint: disable=C0330
# Container for options
_config = xdict()
# Subscribers to option changes
_subscribers = weakref.WeakKeyDictionary()
def iteroptions(*args, **kwargs):
'''
Iterate through name / value pairs of options
Options can come in several forms. They can be consecutive arguments
where the first argument is the name and the following argument is
the value. They can be two-element tuples (or lists) where the first
element is the name and the second element is the value. You can
also pass in a dictionary of key / value pairs. And finally, you can
use keyword arguments.
Parameters
----------
*args : any, optional
See description above.
**kwargs : key / value pairs, optional
Arbitrary keyword arguments.
Returns
-------
generator
Each iteration returns a name / value pair in a tuple
'''
args = list(args)
while args:
item = args.pop(0)
if isinstance(item, (list, tuple)):
yield item[0], item[1]
elif isinstance(item, dict):
for key, value in six.iteritems(item):
yield key, value
else:
yield item, args.pop(0)
for key, value in six.iteritems(kwargs):
yield key, value
def subscribe(func):
'''
Add a subscriber function to option events
Parameters
----------
func : callable
A callable object that takes two parameters: key and value.
This function is called with the name and value of any option
that is set.
Returns
-------
None
'''
if isinstance(func, types.MethodType):
obj = six.get_method_self(func)
func = six.get_method_function(func)
_subscribers[func] = (weakref.ref(func), weakref.ref(obj))
else:
_subscribers[func] = (weakref.ref(func), None)
def unsubscribe(func):
'''
Remove a subscriber from option events
Parameters
----------
func : callable
The callable used to subscribe to option events
Returns
-------
None
'''
_subscribers.pop(func, None)
@contextlib.contextmanager
def option_context(*args, **kwargs):
'''
Create a context for setting option temporarily
Parameters
----------
*args : string / any pairs
Name / value pairs in consecutive arguments (not tuples)
**kwargs : dict
Key / value pairs of options
'''
# Save old state and set new option values
oldstate = {}
for key, value in iteroptions(*args, **kwargs):
key = key.lower()
oldstate[key] = get_option(key)
set_option(key, value)
# Yield control
yield
# Set old state back
for key, value in six.iteritems(oldstate):
set_option(key, value)
def _get_option_leaf_node(key):
'''
Find full option name of given key
Parameters
----------
key : string
Either a partial key or full key name of an option
Returns
-------
string
The full key name of the option
Raises
------
ESPOptionError
If more than one option matches
'''
flatkeys = list(_config.flatkeys())
key = key.lower()
if key in flatkeys:
return key
keys = [k for k in flatkeys if k.endswith('.' + key)]
if len(keys) > 1:
raise ESPOptionError('There is more than one option with the name %s.' % key)
if not keys:
raise ESPOptionError('%s is not a valid option name.' % key)
return keys[0]
def set_option(*args, **kwargs):
'''
Set the value of an option
Parameters
----------
*args : string / any pairs
The name and value of an option in consecutive arguments (not tuples)
**kwargs : dict
Arbitrary keyword / value pairs
'''
for key, value in iteroptions(*args, **kwargs):
key = _get_option_leaf_node(key)
opt = _config[key]
if not isinstance(opt, SWATOption):
raise ESPOptionError('%s is not a valid option name' % key)
opt.set(value)
set_options = set_option
def get_option(key):
'''
Get the value of an option
Parameters
----------
key : string
The name of the option
Returns
-------
any
The value of the option
'''
key = _get_option_leaf_node(key)
opt = _config[key]
if not isinstance(opt, SWATOption):
raise ESPOptionError('%s is not a valid option name' % key)
return opt.get()
def get_suboptions(key):
'''
Get the dictionary of options at the level `key`
Parameters
----------
key : string
The name of the option collection
Returns
-------
dict
The dictionary of options at level `key`
'''
if key not in _config:
raise ESPOptionError('%s is not a valid option name' % key)
opt = _config[key]
if isinstance(opt, SWATOption):
raise ESPOptionError('%s does not have sub-options' % key)
return opt
def get_default(key):
'''
Get the default value of an option
Parameters
----------
key : string
The name of the option
Returns
-------
any
The default value of the option
'''
key = _get_option_leaf_node(key)
opt = _config[key]
if not isinstance(opt, SWATOption):
raise ESPOptionError('%s is not a valid option name' % key)
return opt.get_default()
get_default_val = get_default
def describe_option(*keys, **kwargs):
'''
Print the description of one or more options
Parameters
----------
*keys : one or more strings
Names of the options
'''
_print_desc = kwargs.get('_print_desc', True)
out = []
if not keys:
keys = sorted(_config.flatkeys())
else:
newkeys = []
for k in keys:
try:
newkeys.append(_get_option_leaf_node(k))
except ESPOptionError:
newkeys.append(k)
for key in keys:
if key not in _config:
raise ESPOptionError('%s is not a valid option name' % key)
opt = _config[key]
if isinstance(opt, xdict):
desc = describe_option(*['%s.%s' % (key, x)
for x in opt.flatkeys()], _print_desc=_print_desc)
if desc is not None:
out.append(desc)
continue
if _print_desc:
print(opt.__doc__)
print('')
else:
out.append(opt.__doc__)
if not _print_desc:
return '\n'.join(out)
def reset_option(*keys):
'''
Reset one or more options back to their default value
Parameters
----------
*keys : one or more strings
Names of options to reset
'''
if not keys:
keys = sorted(_config.flatkeys())
else:
keys = [_get_option_leaf_node(k) for k in keys]
for key in keys:
if key not in _config:
raise ESPOptionError('%s is not a valid option name' % key)
opt = _config[key]
if not isinstance(opt, SWATOption):
raise ESPOptionError('%s is not a valid option name' % key)
# Reset swat options
set_option(key, get_default(key))
def check_int(value, minimum=None, maximum=None, exclusive_minimum=False,
exclusive_maximum=False, multiple_of=None):
'''
Validate an integer value
Parameters
----------
value : int or float
Value to validate
minimum : int, optional
The minimum value allowed
maximum : int, optional
The maximum value allowed
exclusive_minimum : boolean, optional
Should the minimum value be excluded as an endpoint?
exclusive_maximum : boolean, optional
Should the maximum value be excluded as an endpoint?
multiple_of : int, optional
If specified, the value must be a multple of it in order for
the value to be considered valid.
Returns
-------
int
The validated integer value
'''
try:
out = int(value)
except:
raise ESPOptionError('Could not convert %s to an integer' % value)
if minimum is not None:
if out < minimum:
raise ESPOptionError('%s is smaller than the minimum value of %s' %
(out, minimum))
if exclusive_minimum and out == minimum:
raise ESPOptionError('%s is equal to the exclusive nimum value of %s' %
(out, minimum))
if maximum is not None:
if out > maximum:
raise ESPOptionError('%s is larger than the maximum value of %s' %
(out, maximum))
if exclusive_maximum and out == maximum:
raise ESPOptionError('%s is equal to the exclusive maximum value of %s' %
(out, maximum))
if multiple_of is not None and (out % int(multiple_of)) != 0:
raise ESPOptionError('%s is not a multiple of %s' % (out, multiple_of))
return out
def check_float(value, minimum=None, maximum=None, exclusive_minimum=False,
exclusive_maximum=False, multiple_of=None):
'''
Validate a floating point value
Parameters
----------
value : int or float
Value to validate
minimum : int or float, optional
The minimum value allowed
maximum : int or float, optional
The maximum value allowed
exclusive_minimum : boolean, optional
Should the minimum value be excluded as an endpoint?
exclusive_maximum : boolean, optional
Should the maximum value be excluded as an endpoint?
multiple_of : int or float, optional
If specified, the value must be a multple of it in order for
the value to be considered valid.
Returns
-------
float
The validated floating point value
'''
try:
out = float(value)
except:
raise ESPOptionError('Could not convert %s to a float' % value)
if minimum is not None:
if out < minimum:
raise ESPOptionError('%s is smaller than the minimum value of %s' %
(out, minimum))
if exclusive_minimum and out == minimum:
raise ESPOptionError('%s is equal to the exclusive nimum value of %s' %
(out, minimum))
if maximum is not None:
if out > maximum:
raise ESPOptionError('%s is larger than the maximum value of %s' %
(out, maximum))
if exclusive_maximum and out == maximum:
raise ESPOptionError('%s is equal to the exclusive maximum value of %s' %
(out, maximum))
if multiple_of is not None and (out % int(multiple_of)) != 0:
raise ESPOptionError('%s is not a multiple of %s' % (out, multiple_of))
return out
def check_boolean(value):
'''
Validate a boolean value
Parameters
----------
value : int or boolean
The value to validate. If specified as an integer, it must
be either 0 for False or 1 for True.
Returns
-------
boolean
The validated boolean
'''
if value is False or value is True:
return value
if isinstance(value, int):
if value == 1:
return True
if value == 0:
return False
raise ESPOptionError('%s is not a boolean or proper integer value')
def check_string(value, pattern=None, max_length=None, min_length=None,
valid_values=None):
'''
Validate a string value
Parameters
----------
value : string
The value to validate
pattern : regular expression string, optional
A regular expression used to validate string values
max_length : int, optional
The maximum length of the string
min_length : int, optional
The minimum length of the string
valid_values : list of strings, optional
List of the only possible values
Returns
-------
string
The validated string value
'''
try:
if isinstance(value, six.text_type):
out = value
elif isinstance(value, six.binary_type):
out = value.decode('utf-8')
else:
out = '%s' % value
except:
raise ESPOptionError('Could not convert string value to unicode')
if max_length is not None and len(out) > max_length:
raise ESPOptionError('%s is longer than the maximum length of %s' %
(out, max_length))
if min_length is not None and len(out) < min_length:
raise ESPOptionError('%s is shorter than the minimum length of %s' %
(out, min_length))
if pattern is not None and not re.search(pattern, out):
raise ESPOptionError('%s does not match pattern %s' % (out, pattern))
if valid_values is not None and out not in valid_values:
raise ESPOptionError('%s is not one of the possible values: %s' %
(out, ', '.join(valid_values)))
return out
def check_url(value, pattern=None, max_length=None, min_length=None, valid_values=None):
'''
Validate a URL value
Parameters
----------
value : any
The value to validate. This value will be cast to a string
and converted to unicode.
pattern : regular expression string, optional
A regular expression used to validate string values
max_length : int, optional
The maximum length of the string
min_length : int, optional
The minimum length of the string
valid_values : list of strings, optional
List of the only possible values
Returns
-------
string
The validated URL value
'''
out = check_string(value, pattern=pattern, max_length=max_length,
min_length=min_length, valid_values=valid_values)
try:
urlparse(out)
except:
raise ESPOptionError('%s is not a valid URL' % value)
return out
class SWATOption(object):
'''
SWAT configuration option
Parameters
----------
name : string
The name of the option
typedesc : string
Description of the option data type (e.g., int, float, string)
validator : callable
A callable object that validates the option value and returns
the validated value.
default : any
The default value of the option
doc : string
The documentation string for the option
environ : string, optional
If specified, the value should be specified in an environment
variable of that name.
Returns
-------
SWATOption object
'''
def __init__(self, name, typedesc, validator, default, doc, environ=None):
self._name = name
self._typedesc = typedesc
self._validator = validator
if environ is not None:
self._default = validator(os.environ.get(environ, default))
else:
self._default = validator(default)
self._environ = environ
self._value = self._default
self._doc = doc
@property
def __doc__(self):
''' Documentation string '''
separator = ' '
if isinstance(self._value, six.string_types) and len(self._value) > 40:
separator = '\n '
return '''%s : %s\n %s\n [default: %s]%s[currently: %s]\n''' % \
(self._name, self._typedesc, self._doc.rstrip().replace('\n', '\n '),
self._default, separator, self._value)
def set(self, value):
'''
Set the value of the option
Parameters
----------
value : any
The value to set
Returns
-------
None
'''
value = self._validator(value)
_config[self._name]._value = value
if self._environ is not None:
os.environ[self._environ] = str(value)
for func, obj in list(_subscribers.values()):
if func is not None:
if obj is None:
func = func()
if func is not None:
func(self._name, value)
else:
func, obj = func(), obj()
if func is not None and obj is not None:
func(obj, self._name, value)
def get(self):
'''
Get the value of the option
Returns
-------
any
The value of the option
'''
if self._environ is not None:
try:
_config[self._name]._value = self._validator(os.environ[self._environ])
except KeyError:
pass
return _config[self._name]._value
def get_default(self):
'''
Get the default value of the option
Returns
-------
any
The default value of the option
'''
return _config[self._name]._default
def register_option(key, typedesc, validator, default, doc, environ=None):
'''
Register a new option
Parameters
----------
key : string
The name of the option
typedesc : string
Description of option data type (e.g., int, float, string)
validator : callable
A callable object that validates the value and returns
a validated value.
default : any
The default value of the option
doc : string
The documentation string for the option
environ : string, optional
If specified, the value should be specified in an environment
variable of that name.
Returns
-------
None
'''
_config[key] = SWATOption(key, typedesc, validator, default, doc, environ=environ)
class AttrOption(object):
'''
Attribute-style access of SWAT options
'''
def __init__(self, name):
object.__setattr__(self, '_name', name)
def __dir__(self):
if self._name in _config:
return _config[self._name].flatkeys()
return _config.flatkeys()
@property
def __doc__(self):
if self._name:
return describe_option(self._name, _print_desc=False)
return describe_option(_print_desc=False)
def __getattr__(self, name):
name = name.lower()
if self._name:
fullname = self._name + '.' + name
else:
fullname = name
if fullname not in _config:
fullname = _get_option_leaf_node(fullname)
out = _config[fullname]
if not isinstance(out, SWATOption):
return type(self)(fullname)
return out.get()
def __setattr__(self, name, value):
name = name.lower()
if self._name:
fullname = self._name + '.' + name
else:
fullname = name
if fullname not in _config:
fullname = _get_option_leaf_node(fullname)
out = _config[fullname]
if not isinstance(out, SWATOption):
return type(self)(fullname)
_config[fullname].set(value)
return
def __call__(self, *args, **kwargs):
''' Shortcut for option context '''
return option_context(*args, **kwargs)
# Object for setting and getting options using attribute syntax
options = AttrOption(None)
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/utils/config.py
| 0.771069 | 0.211213 |
config.py
|
pypi
|
from __future__ import print_function, division, absolute_import, unicode_literals
import base64
import csv
import datetime
import decimal
import json
import numpy as np
import os
import pandas as pd
import re
import six
import sys
import xml.etree.ElementTree as ET
from six.moves import urllib
from ..base import ESPObject
from ..config import get_option
EPOCH = datetime.datetime(1970, 1, 1)
def str_to_float(value):
''' Convert value to float '''
if isinstance(value, six.string_types):
if value.strip() == '' or 'nan' in value.lower():
return np.nan
return np.float64(value)
def str_to_int32(value):
''' Convert value to float '''
if isinstance(value, six.string_types):
if value.strip() == '' or 'nan' in value.lower():
return np.nan
return np.int32(value)
def str_to_int64(value):
''' Convert value to float '''
if isinstance(value, six.string_types):
if value.strip() == '' or 'nan' in value.lower():
return np.nan
return np.int64(value)
def double_array(value):
''' Convert value to array of doubles '''
if isinstance(value, six.string_types):
if re.match(r'^\s*\[', value):
out = [str_to_float(x)
for x in re.sub(r'[\[\]\s+]', r'', value).split(';')]
else:
out = [str_to_float(value)]
else:
out = [np.float64(value)]
return np.array(out, dtype=np.float64)
def int32_array(value):
''' Convert value to array of int32s '''
if isinstance(value, six.string_types):
if re.match(r'^\s*\[', value):
out = [str_to_int32(x)
for x in re.sub(r'[\[\]\s+]', r'', value).split(';')]
else:
out = [str_to_int32(value)]
else:
out = [np.int32(value)]
return np.array(out, dtype=np.int32)
def int64_array(value):
''' Convert value to array of int32s '''
if isinstance(value, six.string_types):
if re.match(r'^\s*\[', value):
out = [str_to_int64(x)
for x in re.sub(r'[\[\]\s+]', r'', value).split(';')]
else:
out = [str_to_int64(value)]
else:
out = [np.int64(value)]
return np.array(out, dtype=np.int64)
ESP2PY_MAP = {
'date': lambda x: EPOCH + datetime.timedelta(seconds=int(x)),
'stamp': lambda x: EPOCH + datetime.timedelta(microseconds=int(x)),
'double': str_to_float,
'int64': lambda x: np.int64(decimal.Decimal(x)),
'int32': lambda x: np.int32(decimal.Decimal(x)),
'money': decimal.Decimal,
'blob': base64.b64decode,
'string': lambda x: hasattr(x, 'decode') and x.decode('utf-8') or x,
'array(dbl)': double_array,
'array(double)': double_array,
'array(i32)': int32_array,
'array(int32)': int32_array,
'array(i64)': int64_array,
'array(int64)': int64_array,
}
ESP2DF_TYPEMAP = {
'date': datetime.datetime.now(),
'stamp': datetime.datetime.now(),
'double': np.float64(0),
'int64': np.int64(0),
'int32': np.int32(0),
'money': decimal.Decimal(0),
'blob': b'bytes',
'string': u'string',
'array(dbl)': pd.Series(dtype=np.float64),
'array(double)': pd.Series(dtype=np.float64),
'array(i32)': pd.Series(dtype=np.int32),
'array(int32)': pd.Series(dtype=np.int32),
'array(i64)': pd.Series(dtype=np.int64),
'array(int64)': pd.Series(dtype=np.int64),
}
def get_dataframe(obj):
'''
Get an empty DataFrame that represents the Window schema
Parameters
----------
schema : Schema, optional
The schema to use instead of the given window's schema
Returns
-------
:class:`pandas.DataFrame`
'''
try:
BaseWindow, Schema # noqa: F821
except:
from ..schema import Schema
from ..windows import BaseWindow
if isinstance(obj, Schema):
schema = obj
elif isinstance(obj, BaseWindow) and obj.schema.fields:
schema = obj.schema
else:
schema = get_schema(obj, obj.fullname)
columns = []
row = []
index = []
int32s = []
int32_arrays = []
for field in schema.fields.values():
columns.append(field.name)
row.append(ESP2DF_TYPEMAP[field.type])
if field.type == 'int32':
int32s.append(field.name)
elif field.type in ['array(int32)', 'array(i32)']:
int32s.append(field.name)
if field.key:
index.append(field.name)
out = pd.DataFrame(columns=columns, data=[row])
for name in int32s:
out[name] = out[name].astype('int32')
for name in int32_arrays:
out[name] = out[name].apply(lambda x: x.astype('int32'),
convert_dtype=False)
if index:
out = out.set_index(index)
return out.iloc[0:0]
def get_schema(obj, window):
''' Retrieve the schema for the specified window '''
try:
get_window_class # noqa: F821
except:
from ..windows import get_window_class
if isinstance(window, six.string_types):
path = window.replace('.', '/')
else:
if getattr(window, 'schema') and window.schema.fields:
return window.schema.copy(deep=True)
path = window.fullname.replace('.', '/')
res = obj._get(urllib.parse.urljoin(obj.base_url, 'windows/%s' % path),
params=dict(schema='true'))
for item in res.findall('./*'):
try:
wcls = get_window_class(item.tag)
except KeyError:
raise TypeError('Unknown window type: %s' % item.tag)
return wcls.from_xml(item, session=obj.session).schema
def get_events(obj, data, format='xml', separator=None, single=False, server_info=None):
'''
Convert events to DataFrames
Parameters
----------
obj : ESPObject
The calling object. If this is a Schema, that schema is used
for the events. If it is a Window, the schema for tha window
is used for the events.
data : xml-string or ElementTree.Element
The events to process
format : string, optional
The format of the events
separator : string, optional
The separator between each 'properties' events
single : bool, optional
Only return a single DataFrame rather than a dictionary.
If there is more than one DataFrame, raise an exception.
server_info : dict, optional
Information about the server, for version-specific behaviors
Returns
-------
dict of :class:`pandas.DataFrame`
If single == False
:class:`pandas.DataFrame`
If single == True
'''
try:
BaseWindow, Schema # noqa: F821
except:
from ..schema import Schema
from ..windows import BaseWindow
server_info = server_info or {}
if get_option('debug.events'):
sys.stderr.write('%s\n' % data)
if format.lower() == 'csv':
return get_csv_events(obj, data)
if format.lower() == 'json':
return get_json_events(obj, data)
if format.lower() == 'properties':
try:
return get_properties_events(obj, data, separator)
except:
import traceback
traceback.print_exc()
raise
if isinstance(data, six.string_types):
data = ET.fromstring(data)
windows = dict()
from . import xml
for event in data.findall('./event'):
wname = event.attrib.get('window', '')
if wname not in windows:
current = windows[wname] = dict(transformers={}, columns=[],
index=[], events=[], dtypes=[])
if isinstance(obj, Schema):
schema = obj
elif isinstance(obj, BaseWindow) and obj.fullname == wname.replace('/', '.') and obj.schema.fields:
schema = obj.schema
elif not wname:
if isinstance(obj, BaseWindow):
schema = get_schema(obj, obj.fullname)
else:
raise ValueError('Could not determine window schema')
else:
schema = get_schema(obj, wname)
for field in schema.fields.values():
current['transformers'][field.name] = ESP2PY_MAP.get(field.type,
lambda x: x)
current['columns'].append(field.name)
current['dtypes'].append(field.type)
if field.key:
current['index'].append(field.name)
else:
current = windows[wname]
row = dict()
for item in event.findall('./*'):
row[item.tag] = current['transformers'].get(item.tag, lambda x: x)(item.text)
current['events'].append(row)
out = dict()
for wname, window in windows.items():
orig_wname = wname
wname = wname.replace('/', '.')
out[wname] = pd.DataFrame(window['events'])
columns = [x for x in window['columns'] if x in out[wname].columns]
out[wname] = out[wname][columns]
for colname, dtype in zip(windows[orig_wname]['columns'],
windows[orig_wname]['dtypes']):
if dtype == 'int32':
out[wname][colname] = out[wname][colname].astype('int32', copy=False)
if window['index']:
index = [x for x in window['index'] if x in out[wname].columns]
if index:
out[wname] = out[wname].set_index(index)
if single:
if len(out) == 1:
return list(out.values())[0]
elif not out:
return get_dataframe(obj)
raise ValueError('Output contains more than one value: %s' % out)
return out
def get_csv_events(obj, data):
'''
Convert CSV events to DataFrames
Parameters
----------
obj : ESPObject
The calling object. If this is a Schema, that schema is used
for the events. If it is a Window, the schema for tha window
is used for the events.
data : csv-string
The events to process
Returns
-------
:class:`pandas.DataFrame`
'''
try:
BaseWindow, Schema # noqa: F821
except:
from ..schema import Schema
from ..windows import BaseWindow
if isinstance(obj, Schema):
schema = obj
elif isinstance(obj, BaseWindow):
if obj.schema.fields:
schema = obj.schema
else:
schema = get_schema(obj, obj)
else:
raise ValueError('Can not obtain window schema from given object')
transformers = []
columns = []
index = []
dtypes = []
for fname, field in schema.fields.items():
transformers.append(ESP2PY_MAP.get(field.type, lambda x: x))
columns.append(fname)
dtypes.append(field.type)
if field.key:
index.append(fname)
rows = []
for row in csv.reader(data.rstrip().split('\n')):
row = list(row)[2:]
for i, item in enumerate(row):
row[i] = transformers[i](item)
rows.append(row)
out = pd.DataFrame(data=rows, columns=columns)
out = out.set_index(index)
for colname, dtype in zip(columns, dtypes):
if dtype == 'int32':
out[colname] = out[colname].astype('int32', copy=False)
return out
def get_json_events(obj, data):
'''
Convert JSON events to DataFrames
Parameters
----------
obj : ESPObject
The calling object. If this is a Schema, that schema is used
for the events. If it is a Window, the schema for tha window
is used for the events.
data : json-string
The events to process
Returns
-------
:class:`pandas.DataFrame`
'''
try:
BaseWindow, Schema # noqa: F821
except:
from ..schema import Schema
from ..windows import BaseWindow
if isinstance(obj, Schema):
schema = obj
elif isinstance(obj, BaseWindow):
if obj.schema.fields:
schema = obj.schema
else:
schema = get_schema(obj, obj)
else:
raise ValueError('Can not obtain window schema from given object')
transformers = {}
columns = []
index = []
dtypes = []
for fname, field in schema.fields.items():
transformers[fname] = ESP2PY_MAP.get(field.type, lambda x: x)
columns.append(fname)
dtypes.append(field.type)
if field.key:
index.append(fname)
rows = []
for event in json.loads(data)['events']:
event = event['event']
row = []
for col in columns:
row.append(transformers[col](event[col]))
rows.append(row)
out = pd.DataFrame(data=rows, columns=columns)
out = out.set_index(index)
for colname, dtype in zip(columns, dtypes):
if dtype == 'int32':
out[colname] = out[colname].astype('int32', copy=False)
return out
def get_properties_events(obj, data, separator=None):
'''
Convert properties events to DataFrames
Parameters
----------
obj : ESPObject
The calling object. If this is a Schema, that schema is used
for the events. If it is a Window, the schema for tha window
is used for the events.
data : json-string
The events to process
Returns
-------
:class:`pandas.DataFrame`
'''
try:
BaseWindow, Schema # noqa: F821
except:
from ..schema import Schema
from ..windows import BaseWindow
if separator is None:
separator = '\n\n'
if isinstance(obj, Schema):
schema = obj
elif isinstance(obj, BaseWindow):
if obj.schema.fields:
schema = obj.schema
else:
schema = get_schema(obj, obj)
else:
raise ValueError('Can not obtain window schema from given object: %s' % obj)
transformers = {}
columns = []
index = []
for fname, field in schema.fields.items():
transformers[fname] = ESP2PY_MAP.get(field.type, lambda x: x)
columns.append(fname)
if field.key:
index.append(fname)
rows = []
for event in [x for x in data.split(separator) if x.strip()]:
row = []
for i, col in enumerate(x for x in event.split('\n') if x.strip()):
if i == 0 and col.startswith('opcode='):
continue
col, value = col.split('=', 1)
row.append(transformers[col](value))
rows.append(row)
out = pd.DataFrame(data=rows, columns=columns)
out = out.set_index(index)
return out
|
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/utils/events.py
| 0.566258 | 0.317453 |
events.py
|
pypi
|
import functools
from inspect import signature, Parameter, Signature
from .split_types import Broadcast
class Mut(object):
""" Marker that marks values in an annotation as mutable. """
__slots__ = [ "value" ]
def __init__(self, value):
self.value = value
# Constructor for mutables.
mut = lambda x: Mut(x)
class Annotation(object):
""" An annotation on a function.
Annotations map arguments (by index for regular arguments and by name for
keyword arguments) to their split type.
"""
__slots__ = [ "mutables", "arg_types", "return_type", "kwarg_types" ]
def __init__(self, func, types, kwtypes, return_type):
""" Initialize an annotation for a function invocation with the given
arguments.
Parameters
__________
func : the function that was invoked.
types : the split types of the non-keyword arguments and return type.
kwtypes : the split types of the keyword arguments.
"""
try:
sig = signature(func)
args = [(name, param) for (name, param) in sig.parameters.items()\
if param.kind == Parameter.POSITIONAL_OR_KEYWORD]
num_required_types = 0
for (name, param) in args:
if param.default is Parameter.empty:
num_required_types += 1
if len(types) != num_required_types:
raise ValueError("invalid number of arguments in annotation (expected {}, got {})".format(len(args), len(types)))
# Make sure there's no extraneous args.
kwargs = set([name for (name, param) in args if param.default is not Parameter.empty])
for name in kwargs:
if name not in kwtypes:
kwtypes[name] = Broadcast()
for name in kwtypes:
assert(name in kwargs)
except ValueError as e:
pass
# print("WARN: Continuing without verification of annotation")
# The mutable values. These are indices for positionals and string
# names for keyword args.
self.mutables = set()
# The argument types.
self.arg_types = []
for (i, ty) in enumerate(types):
if isinstance(ty, Mut):
self.arg_types.append(ty.value)
self.mutables.add(i)
else:
self.arg_types.append(ty)
# The return type. This can be None if the function doesn't return anything.
self.return_type = return_type
# Dictionary of kwarg types.
self.kwarg_types = dict()
for (key, value) in kwtypes.items():
if isinstance(value, Mut):
self.kwarg_types[key] = value.value
self.mutables.add(key)
else:
self.kwarg_types[key] = value
def types(self):
""" Iterate over the split types in this annotation. """
for ty in self.arg_types:
yield ty
for ty in self.kwarg_types.values():
yield ty
yield self.return_type
def __str__(self):
if len(self.arg_types) > 0:
args = ", ".join([str(t) for t in self.arg_types])
else:
args = ", " if len(self.kwarg_types) > 0 else ""
if len(self.kwarg_types) > 0:
args += ", "
args += ", ".join(["{}={}".format(k, v) for (k,v) in self.kwarg_types.items()])
return "({}) -> {}".format(args, self.return_type)
|
/sas-0.0.1-py3-none-any.whl/pycomposer/annotation.py
| 0.61555 | 0.276257 |
annotation.py
|
pypi
|
from abc import ABC, abstractmethod
import copy
class SplitTypeError(TypeError):
""" Custom type error for when annotation types cannot be propagated.
Used for debugging so an exception may be raised during a pipeline break.
"""
pass
class SplitType(ABC):
"""The base split type class.
Other types should subclass this to define custom split types for a
library.
"""
def __init__(self):
"""Initialize a new split type."""
pass
def __hash__(self):
return hash(str(self))
def elements(self, value):
""" Returns the number of elements that this value will emit.
This function should return `None` if the splitter will emit elements
indefinitely.
The default implementation calls `len` on value. If this is not
suitable, the split type should override this method.
"""
return len(value)
@abstractmethod
def combine(self, values):
"""Combine a list of values into a single merged value."""
pass
@abstractmethod
def split(self, obj):
"""Returns disjoint split objects based on obj.
split can return any iterable object, but will preferably return a
generator that lazily yields split values from the source object.
"""
pass
def __eq__(self, other):
""" Check whether two types are equal.
Note that two split types are equal if (a) they have the same class
name and (b) their attributes dictionary is equal.
"""
if other is None:
return False
return self.__dict__ == other.__dict__() and\
type(self).__name__ == type(other).__name__
def __ne__(self, other):
""" Check whether two types are not equal. """
if other is None:
return True
return self.__dict__ != other.__dict__ or\
type(self).__name__ != type(other).__name__
def _sync_check_equal(self, other):
""" Checks whether two types are equal and raises a SplitTypeError if
they are not.
Returns False otherwise (this function is only used in _sync).
"""
if self != other:
raise SplitTypeError("could not sync types {} and {}".format(self, other))
else:
return False
def _sync(self, other):
""" Enforce that two types are the same and returns whether anything
changed.
If the other type is a generic, this type is propagated to the generic
type. Implementators of split types should not need to override this
function.
"""
if not isinstance(other, GenericType):
return self._sync_check_equal(other)
if other._concrete is None:
other._concrete = copy.deepcopy(self)
return True
return self._sync_check_equal(other._concrete)
def _finalized(self):
""" Returns the finalized type of this type. """
return self
class GenericType(SplitType):
"""A generic type that can be substituted with any other type.
Generic types are the only ones that do not have an associated split and
combine implementation. Instead, these types are placeholders that are
replaced with a concrete type before execution.
"""
def __init__(self, name):
"""Creates a new generic named type with a given name.
The given name is local to single annotation. Names across annotations
do not have any meaning, but if two generic types in the same
annotation have the same name, then they will be assigned the same
concrete split type.
"""
# Name of the generic (e.g., "A").
self.name = name
# Used for type inference to distinguish generics with the same name
# but different concrete types.
self._id = None;
# Set of concrete types assigned to this generic type. After type inference,
# the generic type is replaced with this.
self._concrete = None
def _sync(self, other):
""" Several cases to handle here:
1. Generic(ConcreteLeft), Other
assert ConcreteLeft == Other
2. Generic(None), Other
self = Other
3. Generic(ConcreteLeft), Generic(ConcreteRight)
assert ConcreteLeft == ConcreteRight
4. Generic(ConcreteLeft), Generic(None)
other == ConcreteLeft
5. Generic(None), Generic(ConcreteRight)
self = ConcreteRight
6. Generic(None), Generic(None)
< no op >
"""
if not isinstance(other, GenericType):
if self._concrete is not None:
# Case 1
return self._concrete._sync_check_equal(other)
else:
# Case 2
self._concrete = copy.deepcopy(other)
return True
if self._concrete is not None:
if other._concrete is not None:
# Case 3
return self._concrete._sync_check_equal(other._concrete)
else:
# Case 4
other._concrete = copy.deepcopy(self._concrete)
return True
else:
if other._concrete is not None:
# Case 5
self._concrete = copy.deepcopy(other._concrete)
return True
else:
# Case 6 (nothing happens)
return False
def _finalized(self):
""" Converts non-finalized types into finalized types. """
assert self._concrete is not None
return self._concrete
def __str__(self):
suffix = ""
if self._id is not None:
suffix += "<{}>".format(self._id)
if self._concrete is not None:
suffix += "({})".format(self._concrete)
return str(self.name) + suffix
def combine(self, _):
raise ValueError("Combiner called on generic split type")
def split(self, _):
raise ValueError("Split called on generic split type")
class Broadcast(SplitType):
""" A split type that broadcasts values. """
def __init__(self):
pass
def combine(self, values):
if len(values) > 0:
return values[0]
def split(self, _start, _end, value):
return value
def elements(self, _):
return None
def __str__(self): return "broadcast"
# Convinience functions for creating named generics.
A = lambda: GenericType("A")
B = lambda: GenericType("B")
C = lambda: GenericType("C")
D = lambda: GenericType("D")
E = lambda: GenericType("E")
F = lambda: GenericType("F")
G = lambda: GenericType("G")
H = lambda: GenericType("H")
I = lambda: GenericType("I")
J = lambda: GenericType("J")
K = lambda: GenericType("K")
L = lambda: GenericType("L")
M = lambda: GenericType("M")
N = lambda: GenericType("N")
O = lambda: GenericType("O")
P = lambda: GenericType("P")
Q = lambda: GenericType("Q")
R = lambda: GenericType("R")
S = lambda: GenericType("S")
T = lambda: GenericType("T")
U = lambda: GenericType("U")
V = lambda: GenericType("V")
W = lambda: GenericType("V")
X = lambda: GenericType("X")
Y = lambda: GenericType("Y")
Z = lambda: GenericType("Z")
|
/sas-0.0.1-py3-none-any.whl/pycomposer/split_types.py
| 0.892237 | 0.543833 |
split_types.py
|
pypi
|
from abc import ABC, abstractmethod
import types
from .driver import STOP_ITERATION
class Instruction(ABC):
"""
An instruction that updates an operation in a lazy DAG.
"""
@abstractmethod
def evaluate(self, thread, start, end, values, context):
"""
Evaluates an instruction.
Parameters
----------
thread : the thread that is currently executing
start : the start index of the current split value.
end : the end index of the current split value
values : a global value map holding the inputs.
context : map holding execution state (arg ID -> value).
"""
pass
class Split(Instruction):
"""
An instruction that splits the inputs to an operation.
"""
def __init__(self, target, ty):
"""
A Split instruction takes an argument and split type and applies
the splitter on the argument.
Parameters
----------
target : the arg ID that will be split.
ty : the split type.
"""
self.target = target
self.ty = ty
self.splitter = None
def __str__(self):
return "v{} = split {}:{}".format(self.target, self.target, self.ty)
def evaluate(self, thread, start, end, values, context):
""" Returns values from the split. """
if self.splitter is None:
# First time - check if the splitter is actually a generator.
result = self.ty.split(start, end, values[self.target])
if isinstance(result, types.GeneratorType):
self.splitter = result
result = next(self.splitter)
else:
self.splitter = self.ty.split
else:
if isinstance(self.splitter, types.GeneratorType):
result = next(self.splitter)
else:
result = self.splitter(start, end, values[self.target])
if isinstance(result, str) and result == STOP_ITERATION:
return STOP_ITERATION
else:
context[self.target].append(result)
class Call(Instruction):
""" An instruction that calls an SA-enabled function. """
def __init__(self, target, func, args, kwargs, ty):
self.target = target
# Function to call.
self.func = func
# Arguments: list of targets.
self.args = args
# Keyword arguments: Maps { name -> target }
self.kwargs = kwargs
# Return split type.
self.ty = ty
def __str__(self):
args = ", ".join(map(lambda a: "v" + str(a), self.args))
kwargs = list(map(lambda v: "{}=v{}".format(v[0], v[1]), self.kwargs.items()))
arguments = ", ".join([args] + kwargs)
return "v{} = call {}({}):{}".format(self.target, self.func.__name__, arguments, str(self.ty))
def get_args(self, context):
return [ context[target][-1] for target in self.args ]
def get_kwargs(self, context):
return dict([ (name, context[target][-1]) for (name, target) in self.kwargs.items() ])
def evaluate(self, _thread, _start, _end, _values, context):
"""
Evaluates a function call by gathering arguments and calling the
function.
"""
args = self.get_args(context)
kwargs = self.get_kwargs(context)
context[self.target].append(self.func(*args, **kwargs))
|
/sas-0.0.1-py3-none-any.whl/pycomposer/vm/instruction.py
| 0.794783 | 0.50177 |
instruction.py
|
pypi
|
from .driver import STOP_ITERATION
from .instruction import Split
class Program:
"""
A Composer Virtual Machine Program.
A program stores a sequence of instructions to execute.
"""
__slots__ = ["ssa_counter", "insts", "registered", "index"]
def __init__(self):
# Counter for registering instructions.
self.ssa_counter = 0
# Instruction list.
self.insts = []
# Registered values. Maps SSA value to real value.
self.registered = {}
def get(self, value):
"""
Get the SSA value for a value, or None if the value is not registered.
value : The value to lookup
"""
for num, val in self.registered.items():
if value is val:
return num
def set_range_end(self, range_end):
for inst in self.insts:
if isinstance(inst, Split):
inst.ty.range_end = range_end
def step(self, thread, piece_start, piece_end, values, context):
"""
Step the program and return whether are still items to process.
"""
for task in self.insts:
result = task.evaluate(thread, piece_start, piece_end, values, context)
if isinstance(result, str) and result == STOP_ITERATION:
return False
return True
def elements(self, values):
"""Returns the number of elements that this program will process.
This quantity is retrieved by querying the Split instructions in the program.
"""
elements = None
for inst in self.insts:
if isinstance(inst, Split):
e = inst.ty.elements(values[inst.target])
if e is None:
continue
if elements is not None:
assert(elements == e, inst)
else:
elements = e
return elements
def __str__(self):
return "\n".join([str(i) for i in self.insts])
|
/sas-0.0.1-py3-none-any.whl/pycomposer/vm/program.py
| 0.742795 | 0.398699 |
program.py
|
pypi
|
from pathlib import Path
from typing import Union
from rich.console import Console
from sas7bdat_converter import dir_to_csv as converter_dir_to_csv
from sas7bdat_converter import dir_to_excel as converter_dir_to_excel
from sas7bdat_converter import dir_to_json as converter_dir_to_json
from sas7bdat_converter import dir_to_parquet as converter_dir_to_parquet
from sas7bdat_converter import dir_to_xml as converter_dir_to_xml
from sas7bdat_converter import to_csv as converter_to_csv
from sas7bdat_converter import to_excel as converter_to_excel
from sas7bdat_converter import to_json as converter_to_json
from sas7bdat_converter import to_parquet as converter_to_parquet
from sas7bdat_converter import to_xml as converter_to_xml
from typer import Argument, Exit, Option, Typer, echo
__version__ = "1.0.0"
app = Typer()
console = Console()
@app.command()
def to_csv(
file_path: Path = Argument(..., help="Path to the file to convert", show_default=False),
export_file: Path = Argument(..., help="Path to the new csv file", show_default=False),
) -> None:
"""Convert a sas7bdat or xpt file to a csv file."""
with console.status("Converting file..."):
if file_path.suffix != ".sas7bdat" and file_path.suffix != ".xpt":
exit("File must be either a sas7bdat file or a xpt file")
if export_file.suffix != ".csv":
exit("The export file must be a csv file")
converter_to_csv(sas7bdat_file=file_path, export_file=export_file)
@app.command()
def dir_to_csv(
dir: Path = Argument(
..., help="Path to the directory to convert", exists=True, show_default=False
),
output_dir: Union[Path, None] = Option(
None,
"--output-dir",
"-o",
help="Path to the directory to save the output files. Default = The same directory as dir",
show_default=False,
),
continue_on_error: bool = Option(
False,
"--continue-on-error",
"-c",
help="If set conversion will continue after failures",
),
verbose: bool = Option(
False, "--verbose", "-v", help="If set the amount of information printed is increased."
),
) -> None:
"""Convert a directory containing sas7bdat or xpt files to csv files."""
with console.status("Converting files..."):
export_path = output_dir or dir
converter_dir_to_csv(
dir_path=dir,
export_path=export_path,
continue_on_error=continue_on_error,
verbose=verbose,
)
@app.command()
def to_excel(
file_path: Path = Argument(..., help="Path to the file to convert", show_default=False),
export_file: Path = Argument(..., help="Path to the new Excel file", show_default=False),
) -> None:
"""Convert a sas7bdat or xpt file to a xlsx file."""
with console.status("Converting file..."):
if file_path.suffix != ".sas7bdat" and file_path.suffix != ".xpt":
exit("File must be either a sas7bdat file or a xpt file")
if export_file.suffix != ".xlsx":
exit("The export file must be a xlsx file")
converter_to_excel(sas7bdat_file=file_path, export_file=export_file)
@app.command()
def dir_to_excel(
dir: Path = Argument(
..., help="Path to the directory to convert", exists=True, show_default=False
),
output_dir: Union[Path, None] = Option(
None,
"--output-dir",
"-o",
help="Path to the directory to save the output files. Default = The same directory as dir",
show_default=False,
),
continue_on_error: bool = Option(
False,
"--continue-on-error",
"-c",
help="If set conversion will continue after failures",
),
verbose: bool = Option(
False, "--verbose", "-v", help="If set the amount of information printed is increased."
),
) -> None:
"""Convert a directory of sas7bdat or xpt files to xlsx files."""
with console.status("Converting files..."):
export_path = output_dir or dir
converter_dir_to_excel(
dir_path=dir,
export_path=export_path,
continue_on_error=continue_on_error,
verbose=verbose,
)
@app.command()
def to_json(
file_path: Path = Argument(..., help="Path to the file to convert", show_default=False),
export_file: Path = Argument(..., help="Path to the new JSON file", show_default=False),
) -> None:
"""Convert a sas7bdat or xpt file to a JSON file."""
with console.status("Converting file..."):
if file_path.suffix != ".sas7bdat" and file_path.suffix != ".xpt":
exit("File must be either a sas7bdat file or a xpt file")
if export_file.suffix != ".json":
exit("The export file must be a json file")
converter_to_json(sas7bdat_file=file_path, export_file=export_file)
@app.command()
def dir_to_json(
dir: Path = Argument(
..., help="Path to the directory to convert", exists=True, show_default=False
),
output_dir: Union[Path, None] = Option(
None,
"--output-dir",
"-o",
help="Path to the directory to save the output files. Default = The same directory as dir",
show_default=False,
),
continue_on_error: bool = Option(
False,
"--continue-on-error",
"-c",
help="If set conversion will continue after failures",
),
verbose: bool = Option(
False, "--verbose", "-v", help="If set the amount of information printed is increased."
),
) -> None:
"""Convert a directory of sas7bdat or xpt files to json files."""
with console.status("Converting files..."):
export_path = output_dir or dir
converter_dir_to_json(
dir_path=dir,
export_path=export_path,
continue_on_error=continue_on_error,
verbose=verbose,
)
@app.command()
def to_parquet(
file_path: Path = Argument(..., help="Path to the file to convert", show_default=False),
export_file: Path = Argument(..., help="Path to the new parquet file", show_default=False),
) -> None:
"""Convert a sas7bdat or xpt file to a parquet file."""
with console.status("Converting file..."):
if file_path.suffix != ".sas7bdat" and file_path.suffix != ".xpt":
exit("File must be either a sas7bdat file or a xpt file")
if export_file.suffix != ".parquet":
exit("The export file must be a parquet file")
converter_to_parquet(sas7bdat_file=file_path, export_file=export_file)
@app.command()
def dir_to_parquet(
dir: Path = Argument(
..., help="Path to the directory to convert", exists=True, show_default=False
),
output_dir: Union[Path, None] = Option(
None,
"--output-dir",
"-o",
help="Path to the directory to save the output files. Default = The same directory as dir",
show_default=False,
),
continue_on_error: bool = Option(
False,
"--continue-on-error",
"-c",
help="If set conversion will continue after failures",
),
verbose: bool = Option(
False, "--verbose", "-v", help="If set the amount of information printed is increased."
),
) -> None:
"""Convert a directory of sas7bdat or xpt files to parquet files."""
with console.status("Converting files..."):
export_path = output_dir or dir
converter_dir_to_parquet(
dir_path=dir,
export_path=export_path,
continue_on_error=continue_on_error,
verbose=verbose,
)
@app.command()
def to_xml(
file_path: Path = Argument(..., help="Path to the file to convert", show_default=False),
export_file: Path = Argument(..., help="Path to the new XML file", show_default=False),
) -> None:
"""Convert a sas7bdat or xpt file to a xml file."""
with console.status("Converting file..."):
if file_path.suffix != ".sas7bdat" and file_path.suffix != ".xpt":
exit("File must be either a sas7bdat file or a xpt file")
if export_file.suffix != ".xml":
exit("The export file must be a XML file")
converter_to_xml(sas7bdat_file=file_path, export_file=export_file)
@app.command()
def dir_to_xml(
dir: Path = Argument(
..., help="Path to the directory to convert", exists=True, show_default=False
),
output_dir: Union[Path, None] = Option(
None,
"--output-dir",
"-o",
help="Path to the directory to save the output files. Default = The same directory as dir",
show_default=False,
),
continue_on_error: bool = Option(
False,
"--continue-on-error",
"-c",
help="If set conversion will continue after failures",
),
verbose: bool = Option(
False, "--verbose", "-v", help="If set the amount of information printed is increased."
),
) -> None:
"""Convert a directory of sas7bdat or xpt files to xml files."""
with console.status("Converting files..."):
export_path = output_dir or dir
converter_dir_to_xml(
dir_path=dir,
export_path=export_path,
continue_on_error=continue_on_error,
verbose=verbose,
)
@app.callback(invoke_without_command=True)
def main(
version: Union[bool, None] = Option(
None,
"--version",
"-v",
is_eager=True,
help="Show the installed version",
),
) -> None:
if version:
echo(__version__)
raise Exit()
if __name__ == "__main__":
app()
|
/sas7bdat_converter_cli-1.0.0-py3-none-any.whl/sas7bdat_converter_cli/main.py
| 0.682574 | 0.259685 |
main.py
|
pypi
|
from __future__ import annotations
import csv
from pathlib import Path
from xml.sax.saxutils import escape
import numpy as np
import pandas as pd
_FILE_DICT_REQUIRED_KEYS = [
"sas7bdat_file",
"export_file",
]
def batch_to_csv(
file_dicts: list[dict[str, str | Path]],
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts a batch of sas7bdat and/or xpt files to csv files.
Args:
file_dicts: A list dictionaries containing the files to convert. The dictionary should
contain the keys 'sas7bdat_file' (containing the path and name to the sas7bdat
file) and 'export_file' containing the path and name of the export csv).
Example: file_dict = [{
'sas7bdat_file': 'sas_file1.sas7bdat',
'export_file': 'converted_file1.csv',
},
{
'sas7bdat_file': 'sas_file2.sas7bdat',
'export_file': 'converted_file2.csv',
}]
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
for file_dict in file_dicts:
_rise_on_invalid_file_dict(file_dict)
sas7bdat = _format_path(file_dict["sas7bdat_file"])
export = _format_path(file_dict["export_file"])
try:
to_csv(sas7bdat_file=sas7bdat, export_file=export)
except: # noqa: E722
if continue_on_error and verbose:
print(f"Error converting {sas7bdat}") # noqa: T201
elif continue_on_error:
pass
else:
raise
def batch_to_parquet(
file_dicts: list[dict[str, str | Path]],
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts a batch of sas7bdat and/or xpt files to parquet files.
Args:
file_dicts: A list dictionaries containing the files to convert. The dictionary should
contain the keys 'sas7bdat_file' (containing the path and name to the sas7bdat
file) and 'export_file' containing the path and name of the export parquet).
Example: file_dict = [{
'sas7bdat_file': 'sas_file1.sas7bdat',
'export_file': 'converted_file1.parquet',
},
{
'sas7bdat_file': 'sas_file2.sas7bdat',
'export_file': 'converted_file2.parquet',
}]
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
for file_dict in file_dicts:
_rise_on_invalid_file_dict(file_dict)
sas7bdat = _format_path(file_dict["sas7bdat_file"])
export = _format_path(file_dict["export_file"])
try:
to_parquet(sas7bdat_file=sas7bdat, export_file=export)
except: # noqa: E722
if continue_on_error and verbose:
print(f"Error converting {sas7bdat}") # noqa: T201
elif continue_on_error:
pass
else:
raise
def batch_to_excel(
file_dicts: list[dict[str, str | Path]],
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts a batch of sas7bdat and/or xpt files to xlsx files.
Args:
file_dicts: A list of dictionaries containing the files to convert. The dictionary should
contain the keys 'sas7bdat_file' (containing the path and name to the sas7bdat
file) and 'export_file' containing the path and name of the export xlsx).
Example: file_dict = [{
'sas7bdat_file': 'sas_file1.sas7bdat',
'export_file': 'converted_file1.xlsx',
},
{
'sas7bdat_file': 'sas_file2.sas7bdat',
'export_file': 'converted_file2.xlxs',
}]
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
for file_dict in file_dicts:
_rise_on_invalid_file_dict(file_dict)
sas7bdat = _format_path(file_dict["sas7bdat_file"])
export = _format_path(file_dict["export_file"])
try:
to_excel(sas7bdat_file=sas7bdat, export_file=export)
except: # noqa: 722
if continue_on_error and verbose:
print(f"Error converting {sas7bdat}") # noqa: T201
elif continue_on_error:
pass
else:
raise
def batch_to_json(
file_dicts: list[dict[str, str | Path]],
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts a batch of sas7bdat and/or xpt files to json files.
Args:
file_dicts: A list dictionaries containing the files to convert. The dictionary should
contain the keys 'sas7bdat_file' (containing the path and name to the sas7bdat
file) and 'export_file' containing the path and name of the export json).
Example: file_dict = [{
'sas7bdat_file': 'sas_file1.sas7bdat',
'export_file': 'converted_file1.json',
},
{
'sas7bdat_file': 'sas_file2.sas7bdat',
'export_file': 'converted_file2.json',
}]
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
for file_dict in file_dicts:
_rise_on_invalid_file_dict(file_dict)
sas7bdat = _format_path(file_dict["sas7bdat_file"])
export = _format_path(file_dict["export_file"])
try:
to_json(sas7bdat_file=sas7bdat, export_file=export)
except: # noqa: 722
if continue_on_error and verbose:
print(f"Error converting {sas7bdat}") # noqa: T201
elif continue_on_error:
pass
else:
raise
def batch_to_xml(
file_dicts: list[dict[str, str | Path]],
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts a batch of sas7bdat and/or xpt files to xml files.
Args:
file_dicts: A list dictionaries containing the files to convert. The dictionary should
contain the keys 'sas7bdat_file' (containing the path and name to the sas7bdat
file) and 'export_file' containing the path and name of the export xml).
Optinallly the dictionary can also contain 'root_node' (containing the name for
the root node in the xml file, and 'first_node' (containing the name for the
first node in the xml file).
Examples: file_dict = [{'sas7bdat_file': 'sas_file1.sas7bdat',
'export_file': 'converted_file1.xlsx'},
{'sas7bdat_file': 'sas_file2.sas7bdat',
'export_file': 'converted_file2.xlxs'}]
file_dict = [{'sas7bdat_file': 'sas_file1.sas7bdat',
'export_file': 'converted_file1.xml',
'root_node': 'my_root',
'first_node': 'my_first'},
{'sas7bdat_file': 'sas_file2.sas7bdat',
'export_file': 'converted_file2.xml',
'root_node': 'another_root',
'first_node': 'another_first'}]
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
optional_keys = [
"root_node",
"first_node",
]
for file_dict in file_dicts:
error = False
if len(set(file_dict).intersection(_FILE_DICT_REQUIRED_KEYS)) != len(
_FILE_DICT_REQUIRED_KEYS
) or len(set(file_dict).intersection(_FILE_DICT_REQUIRED_KEYS)) > len(
_FILE_DICT_REQUIRED_KEYS
) + len(
optional_keys
):
error = True
elif len(set(file_dict).intersection(optional_keys)) != len(file_dict) - len(
_FILE_DICT_REQUIRED_KEYS
):
error = True
if error:
message = _invalid_key_exception_message(
required_keys=_FILE_DICT_REQUIRED_KEYS, optional_keys=optional_keys
)
raise KeyError(message)
sas7bdat = _format_path(file_dict["sas7bdat_file"])
export = _format_path(file_dict["export_file"])
root_node = None
first_node = None
if "root_node" in file_dict:
root_node = file_dict["root_node"]
if "first_node" in file_dict:
first_node = file_dict["first_node"]
try:
if root_node and first_node:
to_xml(
sas7bdat_file=sas7bdat,
export_file=export,
root_node=str(root_node),
first_node=str(first_node),
)
elif root_node:
to_xml(sas7bdat_file=sas7bdat, export_file=export, root_node=str(root_node))
elif first_node:
to_xml(sas7bdat_file=sas7bdat, export_file=export, first_node=str(first_node))
else:
to_xml(sas7bdat_file=sas7bdat, export_file=export)
except: # noqa: 722
if continue_on_error and verbose:
print(f"Error converting {sas7bdat}") # noqa: T201
elif continue_on_error:
pass
else:
raise
def dir_to_csv(
dir_path: str | Path,
export_path: str | Path | None = None,
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts all sas7bdat and/or xpt files in a directory into csv files.
args:
dir_path: The path to the directory that contains the sas7bdat files
for conversion.
export_path (optional): If used this can specify a new directory to create
the converted files into. If not supplied then the files will be
created into the same directory as dir_path. Default = None
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
_walk_dir("csv", dir_path, export_path, continue_on_error, verbose)
def dir_to_excel(
dir_path: str | Path,
export_path: str | Path | None = None,
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts all sas7bdat and/or xpt files in a directory into xlsx files.
args:
dir_path: The path to the directory that contains the sas7bdat files
for conversion.
export_path (optional): If used this can specify a new directory to create
the converted files into. If not supplied then the files will be
created into the same directory as dir_path. Default = None
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
_walk_dir("xlsx", dir_path, export_path, continue_on_error, verbose)
def dir_to_json(
dir_path: str | Path,
export_path: str | Path | None = None,
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts all sas7bdat and/or xpt files in a directory into json files.
args:
dir_path: The path to the directory that contains the sas7bdat files
for conversion.
export_path (optional): If used this can specify a new directory to create
the converted files into. If not supplied then the files will be
created into the same directory as dir_path. Default = None
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
_walk_dir("json", dir_path, export_path, continue_on_error, verbose)
def dir_to_parquet(
dir_path: str | Path,
export_path: str | Path | None = None,
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts all sas7bdat and/or xpt files in a directory into a parquet files.
args:
dir_path: The path to the directory that contains the sas7bdat files
for conversion.
export_path (optional): If used this can specify a new directory to create
the converted files into. If not supplied then the files will be
created into the same directory as dir_path. Default = None
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
_walk_dir("parquet", dir_path, export_path, continue_on_error, verbose)
def dir_to_xml(
dir_path: str | Path,
export_path: str | Path | None = None,
*,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
"""Converts all sas7bdat and/or xpt files in a directory into xml files.
args:
dir_path: The path to the directory that contains the sas7bdat files
for conversion.
export_path (optional): If used this can specify a new directory to create
the converted files into. If not supplied then the files will be
created into the same directory as dir_path. Default = None
continue_on_error: If set to true processing of files in a batch will continue if there is
a file conversion error instead of raising an exception. Default = False
verbose: Increases the output. Default = True
"""
_walk_dir("xml", dir_path, export_path, continue_on_error, verbose)
def to_csv(sas7bdat_file: str | Path, export_file: str | Path) -> None:
"""Converts a sas7bdat and/or xpt file into a csv file.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
export_file: The name, including the path, for the export file.
"""
valid_extensions = (".csv",)
file_extension = Path(export_file).suffix
if not _is_valid_extension(valid_extensions, file_extension):
error_message = _file_extension_exception_message("to_csv", valid_extensions)
raise AttributeError(error_message)
df = to_dataframe(sas7bdat_file)
df.to_csv(export_file, quoting=csv.QUOTE_NONNUMERIC, index=False)
def to_parquet(sas7bdat_file: str | Path, export_file: str | Path) -> None:
"""Converts a sas7bdat and/or xpt file into a parquet file.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
export_file: The name, including the path, for the export file.
"""
valid_extensions = (".parquet",)
file_extension = Path(export_file).suffix
if not _is_valid_extension(valid_extensions, file_extension):
error_message = _file_extension_exception_message("to_parquet", valid_extensions)
raise AttributeError(error_message)
df = to_dataframe(sas7bdat_file)
try:
df.to_parquet(export_file, index=False)
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The pyarrow extra is required in order to convert a parquet file"
)
def to_dataframe(sas7bdat_file: str | Path) -> pd.DataFrame:
"""Converts a sas7bdat and/or xpt file into a pandas dataframe.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
return:
A pandas dataframe containing the data from the sas7bdat file.
"""
df = pd.read_sas(sas7bdat_file)
# convert binary strings to utf-8
str_df = df.select_dtypes([str(np.dtype(object))])
if len(str_df.columns) > 0:
str_df = str_df.stack().str.decode("utf-8").unstack()
for col in str_df:
df[col] = str_df[col]
# end conversion to utf-8
return df
def to_excel(sas7bdat_file: str | Path, export_file: str | Path) -> None:
"""Converts a sas7bdat and/or xpt file into a xlsx file.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
export_file: The name, including the path, for the export file.
"""
valid_extensions = (".xlsx",)
file_extension = Path(export_file).suffix
if not _is_valid_extension(valid_extensions, file_extension):
error_message = _file_extension_exception_message("to_excel", valid_extensions)
raise AttributeError(error_message)
df = to_dataframe(sas7bdat_file)
try:
df.to_excel(export_file, index=False)
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The optional dependency openpyxl is required in order to convert to an Excel file"
)
def to_json(sas7bdat_file: str | Path, export_file: str | Path) -> None:
"""Converts a sas7bdat and/or xpt file into a json file.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
export_file: The name, including the path, for the export file.
"""
valid_extensions = (".json",)
file_extension = Path(export_file).suffix
if not _is_valid_extension(valid_extensions, file_extension):
error_message = _file_extension_exception_message("to_json", valid_extensions)
raise AttributeError(error_message)
df = to_dataframe(sas7bdat_file)
df.to_json(export_file)
def to_xml(
sas7bdat_file: str | Path,
export_file: str | Path,
root_node: str = "root",
first_node: str = "item",
) -> None:
"""Converts a sas7bdat and/or xpt file into a xml file.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
export_file: The name, including the path, for the export file.
root_node: The name to use for the root node in the xml file.
first_node: The name to use for the fist node in the xml file.
"""
valid_extensions = (".xml",)
file_extension = Path(export_file).suffix
if not _is_valid_extension(valid_extensions, file_extension):
error_message = _file_extension_exception_message("to_xml", valid_extensions)
raise AttributeError(error_message)
df = to_dataframe(sas7bdat_file)
def row_to_xml(row: pd.DataFrame) -> str:
xml = [f" <{first_node}>"]
for i, col_name in enumerate(row.index):
text = row.iloc[i]
if isinstance(text, str):
text = escape(text)
xml.append(f" <{col_name}>{text}</{col_name}>")
xml.append(f" </{first_node}>")
return "\n".join(xml)
res = f'<?xml version="1.0" encoding="UTF-8"?>\n<{root_node}>\n'
res = res + "\n".join(df.apply(row_to_xml, axis=1)) + f"\n</{root_node}>"
with open(export_file, "w") as f:
f.write(res)
def _file_extension_exception_message(conversion_type: str, valid_extensions: tuple[str]) -> str:
if len(valid_extensions) == 1:
is_are = ("extension", "is")
else:
is_are = ("extensions", "are")
extensions = ", ".join(valid_extensions)
return f"sas7bdat conversion error - Valid {is_are[0]} for {conversion_type} conversion {is_are[1]}: {extensions}" # noqa: E501
def _invalid_key_exception_message(
required_keys: list[str], optional_keys: list[str] | None = None
) -> str:
required_keys_joined: str = ", ".join(required_keys)
if optional_keys:
optional_keys_joined: str = ", ".join(optional_keys)
return f"Invalid key provided, expected keys are: {required_keys_joined} and optional keys are: {optional_keys_joined}"
else:
return f"Invalid key provided, expected keys are: {required_keys_joined}"
def _is_valid_extension(valid_extensions: tuple[str], file_extension: str) -> bool:
return file_extension in valid_extensions
def _format_path(path: str | Path) -> str:
return str(path) if isinstance(path, Path) else path
def _rise_on_invalid_file_dict(file_dict: dict[str, str | Path]) -> None:
if len(set(file_dict).intersection(_FILE_DICT_REQUIRED_KEYS)) != len(_FILE_DICT_REQUIRED_KEYS):
message = _invalid_key_exception_message(required_keys=_FILE_DICT_REQUIRED_KEYS)
raise KeyError(message)
def _walk_dir(
file_type: str,
dir_path: str | Path,
export_path: str | Path | None = None,
continue_on_error: bool = False,
verbose: bool = True,
) -> None:
path = dir_path if isinstance(dir_path, Path) else Path(dir_path)
for file_name in path.iterdir():
if file_name.suffix in [".sas7bdat", ".xpt"]:
export_file = Path(f"{file_name.stem}.{file_type}")
if export_path:
export_file = Path(export_path).joinpath(export_file)
else:
export_file = path.joinpath(export_file)
sas7bdat_file = path.joinpath(file_name)
try:
if file_type == "csv":
to_csv(str(sas7bdat_file), str(export_file))
elif file_type == "json":
to_json(str(sas7bdat_file), str(export_file))
elif file_type == "xlsx":
to_excel(str(sas7bdat_file), str(export_file))
elif file_type == "xml":
to_xml(str(sas7bdat_file), str(export_file))
elif file_type == "parquet":
to_parquet(str(sas7bdat_file), str(export_file))
except: # noqa: 722
if continue_on_error and verbose:
print(f"Error converting {sas7bdat_file}") # noqa: T201
elif continue_on_error:
pass
else:
raise
|
/sas7bdat_converter-2.0.0-py3-none-any.whl/sas7bdat_converter/converter.py
| 0.695131 | 0.366561 |
converter.py
|
pypi
|
import subprocess
import sqlite3
from scipy.io import loadmat
import re
import numpy as np
import os
import random
import pickle
class Crawler:
"""This Class allows to load S-matrices from a target directory. Find the
nessecary name/ID in the 'meta_materials.db'. You need to be able to execute
Bash commands. For example usage look below in the 'if name == main' section.
# Arguments
directory: str, path to directory containing the .mat/.npy files
cursor: sqlite3 cursor
"""
def __init__(self, directory, cursor = None):
self.directory = directory
self.cursor = cursor
self.files = os.listdir(self.directory)
def find_path(self, name):
bashCommand = 'find {} -name *{}_Daten_gesamt.mat -print -quit'.format(self.directory, name)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
path = output[:-1].decode('UTF-8')
return path
def find_smat(self, name, adress=None):
path = self.find_path(name)
smat = loadmat(path)['SMAT_']
smat = np.squeeze(smat)
if adress is None:
return smat
else:
wav_length_dim = smat.shape[-3]
adress += [slice(wav_length_dim), slice(4), slice(4)]
if len(smat.shape) != len(adress):
raise ValueError(
'ERROR: S-Mat {} has unexpected shape: {}'.format(name, smat.shape))
return smat[tuple(adress)]
def find_smat_by_id(self, id):
query = 'SELECT m_file, adress FROM simulations WHERE simulation_id = {}'.format(id)
self.cursor.execute(query)
row = self.cursor.fetchone()
name = row[0]
adress = row[1]
if type(adress) is str:
adress = eval(adress,{"__builtins__":None})
return self.find_smat(name, adress)
def load_smat_npy(self, name, adress=None):
smat = np.load("{}/{}{}.npy".format(self.directory, name, adress))
return smat
def load_smat_by_id_npy(self, id):
query = 'SELECT m_file, adress FROM simulations WHERE simulation_id = {}'.format(id)
self.cursor.execute(query)
row = self.cursor.fetchone()
smat = self.load_smat_npy(name=row[0], adress=row[1])
return smat
def load_random_smat_npy(self):
"""
Loads a random smat from a directory of .npy files
self.directory has to point to a .npy directory
# Returns
smat: LX4X4 Array
"""
file = random.choice(self.files)
smat = np.load("{}/{}".format(self.directory, file))
return smat
def extract_all(self, target_dir):
"""
CAREFULL: This copies files to the target_dict.
For every destict m_file name in meta_materials.db this methode looks
for 'm_file*Daten_gesamt.mat' in self.directory and copies it to target_dir.
"""
self.cursor.execute('select m_file from simulations')
names = [name[0] for name in self.cursor.fetchall()]
names = set(names)
for m_file in names:
path = self.find_path(m_file)
bashCommand = 'cp {} {}'.format(path, target_dict)
print(bashCommand)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
def extract_params(self, id):
"""Queries meta_materials.db for all the parameters to the given ID.
# Arguments
id: int
# Returns
param_dict: dict, contains the combined data from the simulations and
geometry tables with coresponding names
"""
#query for the simulation_data of id
query = 'SELECT * FROM simulations WHERE simulation_id = {}'.format(id)
self.cursor.execute(query)
simulation_data = list(self.cursor.fetchall()[0])
#query for the names to simulation_data
query = """SELECT sql FROM sqlite_master
WHERE tbl_name = 'simulations' AND type = 'table'"""
self.cursor.execute(query)
simulation_names = self.cursor.fetchone()[0]
#parse out the names using RegEx
pattern = re.compile(r'\n\s+([a-z_]+)')
matches = pattern.finditer(simulation_names)
simulation_names = [match.group(1) for match in matches]
#join names and data into a dict
simulation_dict = dict(zip(simulation_names, simulation_data))
#repeat the process for the geometry-table
geo = simulation_dict['geometry']
query = 'SELECT * FROM {} WHERE simulation_id = {}'.format(geo, id)
self.cursor.execute(query)
geo_data = self.cursor.fetchall()[0]
query = "SELECT sql FROM sqlite_master WHERE tbl_name = '{}' AND type = 'table'".format(geo)
self.cursor.execute(query)
geo_names = self.cursor.fetchone()[0]
pattern = re.compile(r'\n\s+([a-z_]+)')
matches = pattern.finditer(geo_names)
geo_names = [match.group(1) for match in matches]
geo_dict = dict(zip(geo_names, geo_data))
del geo_dict['simulation_id']
#join the two dicts
param_dict = {**simulation_dict, **geo_dict}
return param_dict
def check_db_for_correct_dimensions(self):
working = 0
all = 0
self.cursor.execute('SELECT simulation_id FROM simulations WHERE angle_of_incidence=0 AND geometry="square"')
ids = [id[0] for id in self.cursor.fetchall()]
for id in ids:
all += 1
print('checking ID: ', id)
try:
#load smat and parameters
smat = self.find_smat_by_id(id)
param_dict = self.extract_params(id)
#extract relevant parameters
L = param_dict['spectral_points']
assert smat.shape == (L, 4, 4)
except Exception as e:
print('couldnt load smat:')
print(e)
continue
working += 1
print('{} out of {} entries working'.format(working, all))
def convert_to_npy(self, ids):
"""
Loads the .mat files for all the IDs, splits them into one file per ID
and saves them as .npy for quicker access
Also extracts the parameters of every ID and saves them to a .pickle file
# Arguments
ids: list
"""
#load param_dict
with open("params.pickle", "rb") as f:
param_dict = pickle.load(f)
for id in ids:
print("converting id: ", id)
#save smat
query = 'SELECT m_file, adress FROM simulations WHERE simulation_id = {}'.format(id)
self.cursor.execute(query)
row = self.cursor.fetchone()
name = row[0]
adress = row[1]
if type(adress) is str:
adress = eval(adress,{"__builtins__":None})
fullname = "{}{}.npy".format(name, adress)
smat = self.find_smat(name, adress)
np.save("smat_data/{}".format(fullname), smat)
#write params to dict
params = self.extract_params(id)
param_dict[fullname] = params
#pickle param_dict
with open("params.pickle", "wb") as f:
pickle.dump(param_dict, f)
def set_condition_flag(self):
"""
Checks if the DB entries meet the physical conditions
"""
self.cursor.execute("""SELECT simulation_id from simulations""")
ids = [id[0] for id in self.cursor.fetchall()]
for id in ids:
print("setting conditions flag for id:", id)
self.cursor.execute(f"""SELECT wavelength_start, periode FROM simulations
WHERE simulation_id = {id}""")
wav , period = self.cursor.fetchone()
#check the condition
if wav > 1.4 * period/1000:
flag = 1
print("true")
else:
flag = 0
#set the flag
self.cursor.execute(f"""UPDATE simulations
SET meets_conditions = {flag}
WHERE simulation_id = {id}""")
#%%
if __name__ == '__main__':
#create a crawler object
conn = sqlite3.connect('NN_smats.db')
cursor = conn.cursor()
crawler = Crawler(directory='collected_mats', cursor=cursor)
crawler.set_condition_flag()
conn.commit()
cursor.execute("""SELECT simulation_id FROM simulations
WHERE geometry='wire'""")
ids = [id[0] for id in cursor.fetchall()]
crawler.convert_to_npy(ids)
|
/sasa_db-0.2.tar.gz/sasa_db-0.2/sasa_db/crawler.py
| 0.511717 | 0.259937 |
crawler.py
|
pypi
|
import numpy as np
def star_product_analyt(SIN_1,SIN_2):
"""
Calculate Lifeng Li's starproduct for two S-matrices SIN_1 and SIN_2,
such that S = S1 * S2. The starproduct between two arbitrary S-matrices
was precalculated analytically with Mathematica.
Parameters
----------
SIN_1 : HxLx4x4 numpy array
H is height_vec_len, the dimension of the height vector
given to the layer object. (Most of the time equal to 1)
L is wav_vec_len the number of measured wavelengths
SIN_2 : HxLx4x4 numpy array
H is height_vec_len, the dimension of the height vector
given to the layer object. (Most of the time equal to 1)
L is wav_vec_len the number of measured wavelengths
Returns
-------
s_out : HxLx4x4 numpy array
"""
height_vec_len = max(SIN_1.shape[0], SIN_2.shape[0])
# S-matrix 1
TF_1 = SIN_1[:,:,0:2,0:2]
TB_1 = SIN_1[:,:,2:4,2:4]
RF_1 = SIN_1[:,:,2:4,0:2]
RB_1 = SIN_1[:,:,0:2,2:4]
# S-matrix 2
TF_2 = SIN_2[:,:,0:2,0:2]
TB_2 = SIN_2[:,:,2:4,2:4]
RF_2 = SIN_2[:,:,2:4,0:2]
RB_2 = SIN_2[:,:,0:2,2:4]
# number of wavelengths
wav_vec_len = TF_1.shape[1]
# declare output matrix
s_out = np.zeros((height_vec_len,wav_vec_len,4,4)).astype(complex)
left_kernel = np.linalg.inv(np.eye(2) - RB_1 @ RF_2)
right_kernel = np.linalg.inv(np.eye(2) - RF_2 @ RB_1)
TF = TF_2 @ left_kernel @ TF_1
TB = TB_1 @ right_kernel @ TB_2
RF = RF_1 + TB_1 @ RF_2 @ left_kernel @ TF_1
RB = RB_2 + TF_2 @ RB_1 @ right_kernel @ TB_2
# Assemble the resulting s-matrix using the elements from above
s_out[:,:,0:2,0:2] = TF
s_out[:,:,2:4,2:4] = TB
s_out[:,:,2:4,0:2] = RF
s_out[:,:,0:2,2:4] = RB
return s_out
def star_product_geometric(SIN_1, SIN_2, order):
"""
A version of star_product where the [I - a @ b]**-1 term is developed as
a geometric series to the nth order.
Parameters
----------
SIN_1: HxLx4x4 numpy array
H is height_vec_len, the dimension of the height vector
given to the layer object. (Most of the time equal to 1)
L is wav_vec_len the number of measured wavelengths
SIN_2: HxLx4x4 numpy array
H is height_vec_len, the dimension of the height vector
given to the layer object. (Most of the time equal to 1)
L is wav_vec_len the number of measured wavelengths
order : int
Returns
-------
s_out : HxLx4x4 numpy array
"""
TF_1 = SIN_1[:,:,0:2,0:2]
TB_1 = SIN_1[:,:,2:4,2:4]
RF_1 = SIN_1[:,:,2:4,0:2]
RB_1 = SIN_1[:,:,0:2,2:4]
TF_2 = SIN_2[:,:,0:2,0:2]
TB_2 = SIN_2[:,:,2:4,2:4]
RF_2 = SIN_2[:,:,2:4,0:2]
RB_2 = SIN_2[:,:,0:2,2:4]
height_vec_len = max(SIN_1.shape[0], SIN_2.shape[0])
wav_vec_len = TF_1.shape[1]
left_kernel = np.zeros((height_vec_len, wav_vec_len, 2, 2)).astype(complex)
right_kernel = np.zeros((height_vec_len, wav_vec_len, 2, 2)).astype(complex)
for n in range(1,order+1):
left_kernel = left_kernel + np.linalg.matrix_power(RB_1 @ RF_2, n)
right_kernel = right_kernel + np.linalg.matrix_power(RF_2 @ RB_1, n)
TF = TF_2 @ TF_1 + TF_2 @ left_kernel @ TF_1
TB = TB_1 @ TB_2 + TB_1 @ right_kernel @ TB_2
RF = RF_1 + TB_1 @ RF_2 @ TF_1 + TB_1 @ RF_2 @ left_kernel @ TF_1
RB = RB_2 + TF_2 @ RB_1 @ TB_2 + TF_2 @ RB_1 @ right_kernel @ TB_2
s_out = np.zeros((height_vec_len, wav_vec_len, 4, 4)).astype(complex)
s_out[:,:,0:2,0:2] = TF
s_out[:,:,2:4,2:4] = TB
s_out[:,:,2:4,0:2] = RF
s_out[:,:,0:2,2:4] = RB
return s_out
def star_product_cascaded(smat_list):
"""
Iteratively calculates the starproduct (Li, 1996) of N S-matrices, where
N >= 2. The iteration goes the through the starproduct pair-wise, so
that: S = ((((((S1 * S2) * S3) * S4) * ... ) * Sn-1) * Sn).
Parameters
----------
smat_list : list
A list containing N HxLx4x4 S-matrices
Returns
-------
smat : HxLx4x4 numpy array
"""
if not type(smat_list) is list:
raise TypeError("Input has to be a list")
elif len(smat_list) <= 1:
raise ValueError("List has to be length 2 or larger")
smat = smat_list[0]
for i in range(1, len(smat_list)):
smat = star_product_analyt(smat, smat_list[i])
return smat
def star_product_cascaded_geo(smat_list, order):
"""
A version of star_product_cascaded unsing star_product_geometric.
Parameters
----------
smat_list : list
A list containing N HxLx4x4 S-matrices
order : int
Returns
-------
smat : An L-by-4-by-4 S-matrix.
"""
if not type(smat_list) is list:
raise TypeError("Input has to be a list")
elif len(smat_list) <= 1:
raise ValueError("List has to be length 2 or larger")
smat = smat_list[0]
for i in range(1, len(smat_list)):
smat = star_product_geometric(smat, smat_list[i], order)
return smat
|
/sasa_phys-0.1.tar.gz/sasa_phys-0.1/sasa_phys/star_product.py
| 0.861203 | 0.689763 |
star_product.py
|
pypi
|
import numpy as np
from .star_product import *
from .smat_oparations import *
class Layer:
"""
Parrent class of Meta- and NonMetaLayer, contains information about
which symmetry opperations will be applied.
"""
def __init__(self):
self.mirror_bool = False
self.flip_bool = False
self.angle = 0
def flip(self):
self.flip_bool = True
return
def mirror(self):
self.mirror_bool = True
def rotate(self, angle):
self.angle = angle
class MetaLayer(Layer):
"""
Class to describe a Meta-Surface in the Stack.
Parameters
----------
s_mat : L x 4 x 4 numpy Array
the Lx4x4 S-Matrix of the Meta-Layer, externally simulated/measured
cladding : vector
containing the refraction indices of the cladding.
substrate : vector
containing the refraction indices of the substrate.
"""
def __init__(self, s_mat, cladding, substrate):
Layer.__init__(self)
self.s_mat = s_mat
self.cladding = cladding
self.substrate = substrate
class NonMetaLayer(Layer):
"""
Class to describe a homogenous isotropic or anisotropic Layer.
Parameters
----------
height : height in (μm)
n_vec : one or two vactors containing the diffraction indeces.
If only one vector is given homogenous behavior will be assumed.
"""
def __init__(self, *n_vec, height):
Layer.__init__(self)
self.height = height
self.height_len = np.size(self.height)
self.n_x = n_vec[0]
# isotropic material
if len(n_vec) == 1:
self.n_y = self.n_x
# anisotropic material
elif len(n_vec) == 2:
self.n_y = n_vec[1]
else:
raise ValueError("input 1 or 2 refrectiv index vectors")
class Stack:
"""
Class to describe the whole Stack, contains information about the layers,
cladding, substrate and further options.
Parameters
----------
layer_list : list of Layer objects
wav_vec : vector
The target wavelengths where the Meta-Surfaces were simulated/
measured
cladding : vector
The refrectiv indeces of the cladding.
substrate : vector
The refractiv indeces of the substrate. The first material to be
hit by light.
"""
def __init__(self, layer_list, wav_vec, cladding, substrate):
self.layer_list = layer_list
self.cladding = cladding
self.substrate = substrate
self.wav_vec = wav_vec
self.wav_vec_len = len(self.wav_vec)
self.__geo_bool = False
self.__geo_order = 5
def create_propagator(self, layer):
"""
Creates the propagator S-Matrix
Parameters
----------
layer : NonMetaLayer or MetaLayer object
Returns
-------
s_mat : H x L x 4 x 4 numpy array
propagation S-Matrix
"""
if type(layer) is NonMetaLayer:
s_mat = np.zeros((layer.height_len, self.wav_vec_len, 4, 4)).astype(complex)
prop_x = np.exp(2j*np.pi * np.outer(layer.height, layer.n_x/self.wav_vec).squeeze())
prop_y = np.exp(2j*np.pi * np.outer(layer.height, layer.n_y/self.wav_vec).squeeze())
s_mat[:, :, 0, 0] = prop_x
s_mat[:, :, 1, 1] = prop_y
s_mat[:, :, 2, 2] = prop_x
s_mat[:, :, 3, 3] = prop_y
elif type(layer) is MetaLayer:
s_mat = layer.s_mat.reshape((1, self.wav_vec_len, 4, 4))
else:
raise ValueError("Stack has to consist of Mata and \
NonMetaLayers")
# apply symmetry opperations
if layer.mirror_bool:
s_mat = mirror_smat(s_mat)
if layer.flip_bool:
s_mat = flip_smat(s_mat)
if layer.angle != 0:
s_mat = rot_smat(s_mat, layer.angle)
return s_mat
def create_interface(self, l_2, l_1):
"""
Creates the interface S-Matrix for the transmission between two Layers
Parameters
----------
l_1 : NonMetaLayer or MetaLayer Objects
l_2 : NonMetaLayer or MetaLayer Objects
Returns
-------
s_mat : L x 4 x 4 numpy array
interface S-Matrix
"""
# load n_* from the Layers
if (type(l_1) is NonMetaLayer):
n1_x = l_1.n_x
n1_y = l_1.n_y
else:
n1_x = l_1.cladding
n1_y = l_1.cladding
if(type(l_2) is NonMetaLayer):
n2_x = l_2.n_x
n2_y = l_2.n_y
else:
n2_x = l_2.substrate
n2_y = l_2.substrate
# transmission and reflection in x and y direction
s_mat_list = np.zeros((self.wav_vec_len, 4, 4)).astype(complex)
# Transmission
s_mat_list[:, 0, 0] = 2*n1_x/(n1_x + n2_x)
s_mat_list[:, 1, 1] = 2*n1_y/(n1_y + n2_y)
s_mat_list[:, 2, 2] = 2*n2_x/(n1_x + n2_x)
s_mat_list[:, 3, 3] = 2*n2_y/(n1_y + n2_y)
# Reflection
R_x = (n1_x - n2_x)/(n1_x + n2_x)
R_y = (n1_y - n2_y)/(n1_y + n2_y)
s_mat_list[:, 0, 2] = R_x
s_mat_list[:, 1, 3] = R_y
s_mat_list[:, 2, 0] = -1*R_x
s_mat_list[:, 3, 1] = -1*R_y
"""
This Operrator is constructed:
[T_x , 0 , R_x, 0],
[ 0 , T_y , 0, R_y],
[-1*R_x, 0 , T_x, 0 ],
[ 0 ,-1*R_y, 0 , T_y ]
"""
return s_mat_list.reshape((1, self.wav_vec_len, 4, 4))
def create_interface_rot(self, l_2, l_1):
"""
Creates the interface S-Matrix for the transmission between
two Layers in case of rotation, uses create_interface
Parameters
----------
l_1 : NonMetaLayer or MetaLayer Objects
l_2 : NonMetaLayer or MetaLayer Objects
Returns
-------
s_mat : Lx4x4 S-Matrix
"""
vacuum_layer = NonMetaLayer(np.ones(self.wav_vec_len), height=None)
s_mat1 = self.create_interface(vacuum_layer, l_2)
s_mat2 = self.create_interface(l_1, vacuum_layer)
s_mat = star_product_analyt(rot_smat(s_mat1, l_2.angle),
rot_smat(s_mat2, l_1.angle))
return s_mat
def build(self):
"""
Builds all the propagation and interface matrices and multiplies them.
Returns
-------
s_mat : Lx4x4 or HxLx4x4 numpy array
S-matrix describing the behavior of the whole stack. The
dimension is HxLx4x4 when a height vector was given
"""
# Create Layer-Objects for the cladding and substrate
clad_layer = NonMetaLayer(self.cladding, height=None)
subs_layer = NonMetaLayer(self.substrate, height=None)
# add the substrate layer to the back
self.layer_list.append(subs_layer)
# create interface between the cladding and the first layer
inter = self.create_interface(clad_layer, self.layer_list[0])
s_mat_list = [inter]
for i in range(len(self.layer_list) - 1):
current_layer = self.layer_list[i]
next_layer = self.layer_list[i+1]
prop = self.create_propagator(current_layer)
# This can be further optimized by a better differentiation between
# the cases
if (current_layer.angle != 0) or (next_layer.angle != 0):
inter = self.create_interface_rot(current_layer, next_layer)
else:
inter = self.create_interface(current_layer, next_layer)
s_mat_list.append(prop)
s_mat_list.append(inter)
# end building loop
if self.__geo_bool:
s_out = star_product_cascaded_geo(s_mat_list, self.geo_order).squeeze()
else:
s_out = star_product_cascaded(s_mat_list).squeeze()
# remove subs_layer from the layer list
del self.layer_list[-1]
return s_out
def build_geo(self, order):
"""
A version of build using star_product_cascaded_geo(), change this doc_str
Returns
-------
s_mat : Lx4x4 or HxLx4x4 numpy array
S-matrix describing the behavior of the whole stack. The
dimension is HxLx4x4 when a height vector was given
"""
self.geo_order = order
self.geo_bool = True
s_mat = self.build()
self.geo_bool = False
return s_mat
def order(self, order):
"""
Returns the nth order S-Matrix of the starproduct developt via the
geometric series.
Parameters
----------
order : int
Returns
-------
s_out : H x L x 4 x 4 numpy Array
S-Matrix of the order'th series developt
"""
self.geo_bool = True
previous_smat = 0
if order > 1:
# calculate previous S-matrix
self.geo_order = order - 1
previous_smat = self.build()
# calculate current S-matrix
self.geo_order = order
current_smat = self.build()
s_out = current_smat - previous_smat
self.geo_bool = False
return s_out
def order_up_to(self, order):
"""
Builds a list of S-matrices up to the target order.
Parameters
----------
order : int
Returns
-------
s_list : list of HxLx4x4 numpy Arrays
"""
"""
currently cant get this working will use the stupid way,
maybe the optimisation is unnecassry
s_list = []
self.geo_bool = True
self.order = order
previous_order = self.build()
for i in range(order-1, 0, -1):
self.order = i
print(i,end=":")
current_order = self.build()
print(previous_order[0,0,0,0])
print(current_order[0,0,0,0])
s_list.insert(0, previous_order - current_order)
previous_order = current_order
self.geo_bool = False
return s_list
"""
s_list = []
self.geo_bool = True
for i in range(1, order+1):
s_list.append(self.order(i))
return s_list
|
/sasa_phys-0.1.tar.gz/sasa_phys-0.1/sasa_phys/stack.py
| 0.85496 | 0.546133 |
stack.py
|
pypi
|
import numpy as np
def mirror_smat(s_mat):
"""
Mirror a given S-Matrix
Parameters
----------
s_mat: L x 4 x 4 numpy Array
S-Matrix
Returns
-------
s_out: L x 4 x 4 numpy Array
mirrored S-Matrix
"""
mask = np.array([[1, -1, 1,-1]
,[-1,1,-1,1]
,[1,-1,1,-1]
,[-1,1,-1,1]])
s_out = s_mat * mask
return s_out
def flip_smat(s_mat):
"""
Flip a given S-Matrix
Parameters
----------
s_mat: L x 4 x 4 numpy Array
S-Matrix
Returns
-------
s_out: L x 4 x 4 numpy Array
flipped S-Matrix
"""
s_out = np.block([
[SMAT[:,2:4,2:4], SMAT[:,2:4,0:2]],
[SMAT[:,0:2,2:4], SMAT[:,0:2,0:2]]])
s_out = array_mirror_smat(Sout)
return s_out
def rot_smat(s_mat,ang):
"""
Rotate a given S-Matrix by a given angle
Parameters
----------
s_mat : Lx4x4 Array
ang : float
rotaion angle in rad
Returns
-------
s_out: Lx4x4 Array
rotated S-Matrix
"""
#convert rotation angle in rad
phi = ang * np.pi/180
#number of wavelengths
numel_wav = s_mat.shape[0]
# Determine 2x2 rotation matrix to be applied on the matrix blocks
R = np.array([[np.cos(phi), -np.sin(phi) ],
[np.sin(phi), np.cos(phi) ] ])
#Define right sided rotation operator of size 4x4
Rot_op = np.block([ [R, np.zeros((2,2))],
[np.zeros((2,2)), R]])
#rotate SMAT
s_out = Rot_op.T @ s_mat @ Rot_op
return s_out
def phase_shift(smat,ang):
"""
Shifting the phase of a given S-Matrix by a given angle
Parameters
----------
s_mat: L x 4 x 4 numpy Array
S-Matrix
ang: float
rotaion angle in rad
Returns
-------
s_out: L x 4 x 4 numpy Array
shifted S-Matrix
"""
smat_arg = np.angle(smat)
smat_abs = np.abs(smat)
s_out = smat_abs*np.exp(1j*(smat_arg+ang))
return s_out
|
/sasa_phys-0.1.tar.gz/sasa_phys-0.1/sasa_phys/smat_oparations.py
| 0.908176 | 0.800458 |
smat_oparations.py
|
pypi
|
from datetime import date, datetime, timedelta
def period_current():
return int(date.today().strftime('%Y%m'))
def period_previous(period = None):
if period is None:
period = period_current()
previous_period = period - 1
if len(str(period)) == 6 and int(str(previous_period)[4:]) == 0:
previous_period = previous_period - 100 + 12
return previous_period
def period_next(period = None):
if period is None:
period = period_current()
next_period = period + 1
if len(str(period)) == 6 and int(str(next_period)[4:]) == 13:
next_period = next_period + 100 - 12
return next_period
def period_range(period_from, period_to = None):
if period_to is None:
period_to = period_current()
periods = []
period = period_from
while period <= period_to :
periods.append(period)
period = period_next(period)
return periods
def period_first_day(period):
return str(period)[0:4]+'-'+str(period)[4:]+'-01'
def period_last_day(period):
first_day_next_period = period_first_day(period_next(period))
first_day_next_period_object = datetime.strptime(first_day_next_period, '%Y-%m-%d')
last_day_object = first_day_next_period_object - timedelta(days=1)
return last_day_object.strftime('%Y-%m-%d')
def period_valid(period, year_from = 2000, year_to = 2100):
try:
int(period)
except ValueError:
return False
period = str(period)
n_period = len(period)
if n_period == 4:
period = int(period)
return period >= year_from and period <= year_to
elif n_period == 6:
year = int(period[0:4])
month = int(period[4:])
return year >= year_from and year <= year_to and month >= 1 and month <= 12
else:
return False
def date_normalize(date, format_from = None, format_to = None):
if format_from is None:
aux = date.split(' ')
if len(aux) == 2:
if len(aux[1]) == 5:
format_from = '%d/%m/%Y %H:%M'
format_to = '%Y-%m-%d %H:%M'
else:
format_from = '%d/%m/%Y %H:%M:%S'
format_to = '%Y-%m-%d %H:%M:%S'
else:
format_from = '%d/%m/%Y'
format_to = '%Y-%m-%d'
date_obj = datetime.strptime(date, format_from)
return date_obj.strftime(format_to)
def date_add_days(days, from_date = None):
if from_date is None:
from_date = str(date.today().strftime('%Y-%m-%d'))
y, m, d = from_date.split('-')
specific_date = datetime(int(y), int(m), int(d))
new_date = specific_date + timedelta(days = days)
return str(new_date).split(' ')[0]
def date_count_days(date_from, date_to = None):
if date_to is None:
date_to = str(date.today().strftime('%Y-%m-%d'))
y_from, m_from, d_from = str(date_from).split(' ')[0].split('-')
y_to, m_to, d_to = str(date_to).split(' ')[0].split('-')
delta = date(int(y_to), int(m_to), int(d_to)) - date(int(y_from), int(m_from), int(d_from))
return delta.days if delta.days >= 0 else delta.days * -1
def month_from_string(month):
month_lower = month.lower()
months = {
'enero': '01',
'febrero': '02',
'marzo': '03',
'abril': '04',
'mayo': '05',
'junio': '06',
'julio': '07',
'agosto': '08',
'septiembre': '09',
'octubre': '10',
'noviembre': '11',
'diciembre': '12',
}
return months[month_lower] if month_lower in months else month
def month_from_index(index):
index = int(index) % 12
months = [
'enero',
'febrero',
'marzo',
'abril',
'mayo',
'junio',
'julio',
'agosto',
'septiembre',
'octubre',
'noviembre',
'diciembre',
]
return months[index]
def month_from_number(month):
return month_from_index(int(month)-1)
|
/sasco_utils-0.0.4.tar.gz/sasco_utils-0.0.4/sasco_utils/datetime.py
| 0.510252 | 0.583085 |
datetime.py
|
pypi
|
Unreleased
----------
**Improvements**
- Refactor `tasks.py` to utilize `sasctl.pzmm` functions.
- Add `model_info` class to better capture model information.
- Test `/examples` Jupyter notebooks within normal test suite.
v1.10 (2023-08-31)
----------
**Improvements**
- `write_score_code.py` refactored to include ability to run batch scoring.
- Added handling for TensorFlow Keras models.
- Updated project creation to automatically set project properties based on contained models.
- Included capability to assess biases of a model using CAS FairAITools using `pzmm.write_json_files.assess_model_bias()`.
- Added custom KPI support for H2O, statsmodels, TensorFlow, and xgboost.
- Updated examples:
- Added example walking through the creation process of a simple TensorFlow Keras model.
- Added example detailing the usage of `pzmm.write_json_files.assess_model_bias()` for a simple regression model
- Updated `pzmm_custom_kpi_model_parameters` notebook to have correct parameter casing.
v1.9.4 (2023-06-15)
----------
**Improvements**
- Created pytest fixture to begin running Jupyter notebooks within the GitHub automated test actions.
- Updated examples:
- Custom KPI and model parameters example now checks for the performance job's status.
- Update H2O example to show model being published and scored using the "maslocal" destination.
- Updated models to be more realistic for `pzmm_binary_classification_model_import.ipynb`.
**Bugfixes**
- Adjust `pzmm.ScoreCode.write_score_code()` function to be compatible with future versions of pandas.
- Reworked H2O section of `pzmm.ScoreCode.write_score_code()` to properly call H2OFrame values.
- Fixed call to `pzmm.JSONFiles.calculate_model_statistics()` in `pzmm_binary_classification_model_import.ipynb`.
v1.9.3 (2023-06-08)
----------
**Improvements**
- Refactored gitIntegration.py to `git_integration.py` and added unit tests for better test coverage.
**Bugfixes**
- Fixed issue with ROC and Lift charts not properly being written to disk.
- Fixed JSON conversion for Lift charts that caused TRAIN and TEST charts to be incorrect.
- Fixed issue with H2O score code and number of curly brackets.
- Updated score code logic for H2O to account for incompatibility with Path objects.
- Fixed issue where inputVar.json could supply invalid values to SAS Model Manager upon model import.
- Fixed issue with `services.model_publish.list_models`, which was using an older API format that is not valid in SAS Viya 3.5 or SAS Viya 4.
v1.9.2 (2023-05-17)
----------
**Improvements**
- Add recursive folder creation and an example.
- Add example for migrating models from SAS Viya 3.5 to SAS Viya 4.
**Bugfixes**
- Fixed improper json encoding for `pzmm_h2o_model_import.ipynb` example.
- Set urllib3 < 2.0.0 to allow requests to update their dependencies.
- Set pandas >= 0.24.0 to include df.to_list alias for df.tolist.
- Fix minor errors in h2o score code generation
v1.9.1 (2023-05-04)
----------
**Improvements**
- Updated handling of H2O models in `sasctl.pzmm`.
- Models are now saved with the appropriate `h2o` functions within the `sasctl.pzmm.PickleModel.pickle_trained_model` function.
- Example notebooks have been updated to reflect this change.
**Bugfixes**
- Added check for `sasctl.pzmm.JSONFiles.calculate_model_statsistics` function to replace float NaN values invalid for JSON files.
- Fixed issue where the `sasctl.pzmm.JSONFiles.write_model_properties` function was replacing the user-defined model_function argument.
- Added NpEncoder class to check for numpy values in JSON files. Numpy-types cannot be used in SAS Viya.
v1.9.0 (2023-04-04)
----------
**Improvements**
- `sasctl.pzmm` refactored to follow PEP8 standards, include type hinting, and major expansion of code coverage.
- `sasctl.pzmm` functions that can generate files can now run in-memory instead of writing to disk.
- Added custom KPI handling via `pzmm.model_parameters`, allowing users to interact with the KPI table generated by model performance via API.
- Added a method for scikit-learn models to generate hyperparameters as custom KPIs.
- Reworked the `pzmm.write_score_code()` logic to appropriately write score code for binary classification, multi-class classification, and regression models.
- Updated all examples based on `sasctl.pzmm` usage and model assets.
- Examples from older versions moved to `examples/ARCHIVE/vX.X`.
- DataStep or ASTORE models can include additional files when running `tasks.register_model()`.
**Bugfixes**
- Fixed an issue where invalid HTTP responses could cause an error when using `Session.version_info()`.
v1.8.2 (2023-01-30)
-------------------
**Improvements**
- `folders.get_folder()` can now handle folder paths and delegates (e.g. @public).
**Bugfixes**
- Fixed an issue with `model_management.execute_model_workflow_definition()` where input values for
workflow prompts were not correctly submitted. Note that the `input=` parameter was renamed to
`prompts=` to avoid conflicting with the built-in `input()`.
- Fixed an issue with `pzmm.importModel.model_exists()` where project versions were incorrectly
compared, resulting in improper behavior when the project version already existed.
- Better handling for invalid project versions included.
v1.8.1 (2023-01-19)
----------
**Changes**
- Adjusted workflow for code coverage reporting. Prepped to add components in next release.
- Added `generate_requirements_json.ipynb` example.
**Bugfixes**
- Fixed improper math.fabs use in `sasctl.pzmm.writeJSONFiles.calculateFitStat()`.
- Fixed incorrect ast node walk for module collection in `sasctl.pzmm.writeJSONFiles.create_requirements_json()`.
v1.8.0 (2022-12-19)
-------------------
**Improvements**
- Added `Session.version_info()` to check which version of Viya the session is connected to.
- Updated the `properties=` parameter of `model_repository.create_model()` to accept a dictionary containing
custom property names and values, and to correctly indicate their type (numeric, string, date, datetime) when
passing the values to Viya.
- Added `services.saslogon` for creating and removing OAuth clients.
- Added `pzmm.JSONFiles.create_requirements_json()` to create the requirements.json file for model deployment
to containers based on the user's model assets and Python environment.
**Changes**
- Deprecated `core.platform_version()` in favor of `Session.version_info()`.
- A `RuntimeError` is now raised if an obsolete service is called on a Viya 4 session (sentiment_analysis,
text_categorization, and text_parsing)
- Replaced the JSON cassettes used for testing with compressed binary cassettes to save space.
- Updated the testing framework to allow regression testing of multiple Viya versions.
- Refactored the authentication functionality in `Session` to be more clear and less error prone. Relevant
functions were also made private to reduce clutter in the class's public interface.
- Began refactor for `sasctl.pzmm` to adhere to PEP8 guidelines and have better code coverage.
**Bugfixes**
- Fixed an issue with `register_model()` that caused invalid SAS score code to be generated when registering an
ASTORE model in Viya 3.5.
- Fixed a bug where calling a "get_item()" function and passing `None` would throw an error on most services instead
of returning `None`.
- Fixed a bug that caused the authentication flow to be interrupted if Kerberos was missing.
v1.7.3 (2022-09-20)
-------------------
**Improvements**
- Refactor astore model upload to fix 422 response from SAS Viya 4
- ASTORE model import now uses SAS Viya to generate ASTORE model assets
- Expanded usage for cas_management service (credit to @SilvestriStefano)
**Bugfixes**
- ASTORE model import no longer returns a 422 error
- Fix improper filter usage for model_repository service
- Fix error with loss of stream in add_model_content call for duplicate content
- Update integration test cassettes for SAS Viya 4
v1.7.2 (2022-06-16)
-------------------
**Improvements**
- Added a new example notebook for git integration
- Added a model migration tool for migrating Python models from Viya 3.5 to Viya 4
- Improved handling of CAS authentication with tokens
**Bugfixes**
- Fixed git integration failure caused by detached head
- Fixed minor bugs in score code generation feature
- Fixed 500 error when importing models to Viya 4 with prewritten score code
- Fixed incorrect handling of optional packages in pzmm
v1.7.1 (2022-04-19)
-------------------
**Bugfixes**
- Removed linux breaking import from new git integration feature
- Various minor bug fixes in the git integration feature
v1.7.0 (2022-04-07)
-------------------
**Improvements**
- Added Git integration for better tracking of model history and versioning.
- Added MLFlow integration for simple models, allowing users to import simple MLFlow models, such as sci-kit
learn, to SAS Model Manager
v1.6.4 (2022-04-07)
-------------------
**Bugfixes**
- Fixed an issue where `folders.create_folder()` would attempt to use root folder as parent if desired parent
folder wasn't found. Now correctly handles parent folders and raises an error if folder not found.
v1.6.3 (2021-09-23)
-------------------
**Bugfixes**
- Fix an issue where `pzmm.ZipModel.zipFiles()` threw an error on Python 3.6.1 and earlier.
v1.6.2 (2021-09-09)
-------------------
**Bugfixes**
- Fixed an issue with `register_model()` where random forest, gradient boosting, and SVM regression models with
nominal inputs where incorrectly treated as classification models.
v1.6.1 (2021-09-01)
-------------------
**Improvements**
- `model_repository.add_model_content()` will now overwrite existing files instead of failing.
**Bugfixes**
- `PagedList.__repr__()` no longer appears to be an empty list.
v1.6.0 (2021-06-29)
-------------------
**Improvements**
- `Session` now supports authorization using OAuth2 tokens. Use the `token=` parameter in the constructor when
an existing access token token is known. Alternatively, omitting the `username=` and `password=` parameters
will now prompt the user for an auth code.
**Changes**
- `current_session` now stores & returns the *most recently created* session, not the first created session. This
was done to alleviate quirks where an old, expired session is implicitly used instead of a newly-created session.
- Removed deprecated `raw=` parameter from `sasctl.core.request()`.
- Dropped support for Python 2.
v1.5.9 (2021-06-09)
-------------------
**Bugfixes**
- Fixed an issue that caused score code generation by `pzmm` module to fail with Viya 3.5.
v1.5.8 (2021-05-18)
-------------------
**Bugfixes**
- SSL warnings no longer repeatedly raised when `verify_ssl=False` but `CAS_CLIENT_SSL_CA_LIST` is specified.
- `model_repository.delete_model_contents()` no longer fails when only one file is found.
**Improvements**
- All `delete_*()` service methods return `None` instead of empty string.
- All `get_*()` service methods issue a warning if multiple items are found when retrieving by name.
v1.5.7 (2021-05-04)
-------------------
**Bugfixes**
- Fixed an import issue that could cause an error while using the `pzmm` submodule.
v1.5.6 (2021-04-30)
-------------------
**Improvements**
- `PagedList` handles situations where the server over-estimates the number of items available for paging.
- The version of SAS Viya on the server can now be determined using `sasctl.platform_version()`.
**Bugfixes**
- Reworked the `model_repository.get_repository()` to prevent HTTP 403 errors that could occur with some Viya environments.
v1.5.5 (2021-03-26)
-------------------
**Bugfixes***
- Fixed an issue with JSON parsing that caused the `publish_model` task to fail with Viya 4.0.
v1.5.4 (2020-10-29)
------------------
**Improvements**
- Added the `as_swat` method to the `Session` object, allowing connection to CAS through SWAT without an additional authentication step.
**Changes**
- Integrated PZMM into `Session` calls and removed redundant function calls in PZMM.
- ROC and Lift statistic JSON files created by PZMM are now generated through CAS actionset calls.
- Updated the PZMM example notebook, `FleetMaintenance.ipynb`, to include integration of PZMM with sasctl functions.
**Bugfixes**
- Reworked the `model_repository.get_repository()` to prevent HTTP 403 errors that could occur with some Viya environments.
v1.5.3 (2020-06-25)
------------------
**Bugfixes**
- Added PZMM fitstat JSON file to manifest.
v1.5.2 (2020-06-22)
-------------------
**Improvements**
- PZMM module moved from a stand-alone [repository](https://github.com/sassoftware/open-model-manager-resources/tree/master/addons/picklezip-mm) to a sasctl submodule.
- Introduced deprecation warnings for Python 2 users.
v1.5.1 (2020-4-9)
----------------
**Bugfixes**
- Fixed PyMAS utilities to correctly work functions not bound to pickled objects.
- Model target variables should no longer appear as an input variable when registering ASTORE models.
v1.5 (2020-2-23)
----------------
**Improvements**
- Registered Python models will now include both `predict` and `predict_proba` methods.
- Added a new Relationships service for managing links between objects.
- Added a new Reports service for retrieving SAS Visual Analytics reports.
- Added a new Report_Images service for rendering content from reports.
- Additional metadata fields are set when registering an ASTORE model.
- Collections of items should now return an instance of `PagedList` for lazy loading of results.
- Module steps can now be called using `module.step(df)` where `df` is the row of a DataFrame or Numpy array.
- `register_model` sets additional project properties when registering an ASTORE model.
**Changes**
- Replaced the `raw` parameter of the `request` methods with a `format` parameter, allowing more control over the
returned value.
- The `get_file_content` method of the Files service now returns the actual content instead of the file metadata.
- JSON output when using `sasctl` from the command line is now formatted correctly.
**Bugfixes**
- `model_publish.delete_destination` now works correctly.
v1.4.6 (2020-1-24)
------------------
**Bugfixes**
- Fixed an issue where the `REQUESTS_CA_BUNDLE` environment variable was taking precedence over the `verify_ssl` parameter.
v1.4.5 (2019-12-5)
------------------
**Changes**
- Saving of package information can now be disabled using the `record_packages` parameter of `register_model`.
**Bugfixes**
- Added support for uint data types to the `register_model` task.
- Fixed an issue where long package names caused `register_model` to fail.
- `Session` creation now works with older versions of urllib3.
v1.4.4 (2019-10-31)
-------------------
**Bugfixes**
- Match performance definitions based on project instead of model.
v1.4.3 (2019-10-28)
-------------------
**Bugfixes**
- Model versioning now works correctly for Python models
- Fixed an issue where `None` values in Python caused issues with MAS models.
v1.4.2 (2019-10-23)
-------------------
**Bugfixes**
- Fixed project properties when registering a model from ASTORE.
- Fixed model metadata when registering a datastep model.
v1.4.1 (2019-10-17)
-------------------
**Bugfixes**
- Fixed an issue where string inputs to Python models were incorrectly handled by DS2.
v1.4 (2019-10-15)
-----------------
**Changes**
- `PyMAS.score_code` now supports a `dest='Python'` option to retrieve the generated Python wrapper code.
- `register_model` task includes a `python_wrapper.py` file when registering a Python model.
- Improved error message when user lacks required permissions to register a model.
**Bugfixes**
- Fixed an issue with CAS/EP score code that caused problems with model performance metrics.
v1.3 (2019-10-10)
-----------------
**Improvements**
- Added `update_performance` task for easily uploading performance information for a model.
- New (experimental) pyml2sas sub-package provides utilities for generating SAS code from Python gradient boosting models.
- New (experimental) methods for managing workflows added to `model_management` service.
**Changes**
- `register_model` task automatically captures installed Python packages.
- All `list_xxx` methods return all matching items unless a `limit` parameter is specified.
- Improved API documentation.
- Updated `full_lifecycle` example with performance monitoring.
v1.2.5 (2019-10-10)
-------------------
**Changes**
- Registering an ASTORE model now creates an empty ASTORE file in Model Manager to be consistent with Model Studio behavior.
**Bugfixes**
- `microanalytic_score.define_steps` now works with steps having no input parameters.
- Fixed an issue where score code generated from an ASTORE model lacked output variables.
v1.2.4 (2019-9-20)
------------------
**Bugfixes**
- `model_repository.get_model_contents` no longer raises an HTTP 406 error.
v1.2.3 (2019-8-23)
------------------
**Changes**
- `put` request will take an `item` parameter that's used to automatically populate headers for updates.
**Bugfixes**
- Convert NaN values to null (None) when calling `microanalytic_score.execute_module_step`.
v1.2.2 (2019-8-21)
------------------
**Bugfixes**
- `register_model` task should now correctly identify columns when registering a Sci-kit pipeline.
v1.2.1 (2019-8-20)
------------------
**Improvements**
- Added the ability for `register_model` to correctly handle CAS tables containing data step
score code.
v1.2.0 (2019-8-16)
------------------
**Improvements**
- Added `create_model_version` and `list_model_versions` to `model_repository`
- Added an explicit `ValueError` when attempting to register an ASTORE that can't be downloaded.
- Added `start` and `limit` pagination parameters to all default `list_*` service methods.
- Added `create_destination`, `create_cas_destination` and `create_mas_destination` methods for `model_publish` service.
**Changes**
- `Session.add_stderr_logger` default logging level changed to `DEBUG`.
**Bugfixes**
- Fixed an issue where `model_repository` did not find models, projects, or repositories by name once pagination limits were reached.
v1.1.4 (2019-8-16)
-----------------
**Bugfixes**
- The `register_model` task now generates dmcas_epscorecode.sas files for ASTORE models.
v1.1.3 (2019-8-14)
-----------------
**Bugfixes**
- Fixed problem causing `register_model` task to include output variables in the input variables list.
v1.1.2 (2019-8-12)
-----------------
**Improvements**
- CAS model table automatically reloaded on `publish_model` task.
**Bugfixes**
- Fixed DS2 score code for CAS that was generated when registering a Python model.
- `PyMAS.score_code(dest='ESP')` corrected to `dest='EP'`
- Fixed an issue where long user-defined properties prevented model registration.
v1.1.1 (2019-8-6)
-----------------
**Bugfixes**
- Fixed an issue where usernames were not parsed correctly from .authinfo files, resulting in failed logins.
v1.1.0 (2019-8-5)
-----------------
**Improvements**
- Added `update_module` and `delete_module` methods to MAS service.
**Changed**
- Added `replace` parameter to `sasctl.tasks.publish_model`
- `Session` hostname's can now be specified in HTTP format: 'http://example.com'.
**Bugfixes**
- Renamed `microanalytic_store` service to `microanalytic_score`
v1.0.1 (2019-07-31)
-------------------
**Changed**
- Exceptions moved from `sasctl.core` to `sasctl.exceptions`
- `SWATCASActionError` raised if ASTORE cannot be saved during model registration.
- Improved handling of MAS calls made via `define_steps()`
v1.0.0 (2019-07-24)
-------------------
**Changed**
- services are now classes instead of modules.
Imports of services in the format `import sasctl.services.model_management as mm` must be
changed to `from sasctl.services import model_management as mm`.
- `host` and `user` parameters of `Session` renamed to `hostname` and `username` to align with SWAT.
- Only `InsecureRequestWarning` is suppred instead of all `HTTPWarning`
**Improvements**
- Added `copy_analytic_store` method to `model_repository` service
- `AuthenticationError` returned instead of `HTTPError` if session authentication fails.
v0.9.7 (2019-07-18)
-------------------
**Improvements**
- public_model task also defines methods mapped to MAS module steps when publishing to MAS.
- SSL verification can be disable with `SSLREQCERT` environment variable.
- CAs to use for validating SSL certificates can also be specified through the `SSLCALISTLOC` environment variable.
- Added `execute_performance_task`
**Changes**
- Updated method signature for `create_performance_definition` in Model Manager.
**Bugfixes**
- register_model task no longer adds `rc` and `msg` variables from MAS to the project variables.
v0.9.6 (2019-07-15)
-------------------
Initial public release.
|
/sasctl-1.10.0.tar.gz/sasctl-1.10.0/CHANGELOG.md
| 0.945714 | 0.77223 |
CHANGELOG.md
|
pypi
|
try:
import gensim
except ImportError:
print("Warning: Failed to import `gensim`, some functions may not be available")
import multiprocessing
import os
import pandas
from os import path
from pathlib import Path
from collections import OrderedDict
from copy import deepcopy
from .ioio import ioio
from .corpus import Corpus
from .scorology import Scorology
class Vectorology(Scorology, Corpus):
"""
Word embedding model training and scoring
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
os.makedirs(self.embeddings_path, exist_ok=True)
self.embeddings = OrderedDict()
self.shuffled_index = None
if self._to_load.get("models", []):
for model in self._to_load["models"]:
self.load_models_from_store(model)
if len(self.loaded["data"]) == len(
self.loaded["models"]
) == 1 and path.basename(self.loaded["models"][0]).startswith(
path.splitext(path.basename(self.loaded["data"][0]))[0] + "-"
):
index_name = self.loaded["models"][0] + ".index.pickle.xz"
self.shuffled_index = ioio.load(Path(index_name))
print("Index loaded: {}".format(index_name))
else:
print("Index not loaded")
models_path = property(lambda self: path.join(self.storage_dir, "models"))
def set_storage_dir(self, storage_dir):
storage_dir = path.normpath(storage_dir)
os.makedirs(self.embeddings_path, exist_ok=True)
super().set_storage_dir(storage_dir)
def clear_models(self):
self.embeddings = OrderedDict()
self.loaded["models"] = []
def load_models_from_store(self, fdir):
models = OrderedDict()
for fname in sorted(
os.listdir(fdir), key=lambda x: path.getctime(path.join(fdir, x))
):
if fname.isdigit():
fname = int(fname)
models[fname] = ioio.load(Path(fdir, str(fname)))
print("Models loaded: {}".format(path.join(fdir, str(fname))))
self.update_models(models, fdir)
def update_models(self, models, fullname):
if not self.embeddings.keys().isdisjoint(models):
print(
"Warning: models were overwritten upon loading: {}".format(
set(self.embeddings).intersection(models)
)
)
self.embeddings.update(models)
self.loaded["models"].append(path.normpath(fullname))
def load_models(
self,
name,
balance,
iterations,
window,
dimensions,
mode="document",
sg=False,
hs=True,
groupby=None,
load=True,
store=True,
localvocab=False,
):
if name is None:
if len(self.loaded["data"]) == 1:
name = path.splitext(path.basename(self.loaded["data"][0]))[0]
else:
raise Exception(
'Must provide "name" when more than one dataset is loaded'
)
else:
name = path.normpath(name)
name = (
"-".join(
[
name.replace("/", "++").replace(".", "+"),
str("sg" if sg else "cb"),
str("hs" if hs else "ns"),
str(mode),
str(iterations),
str(window),
str(dimensions),
str(balance),
str(localvocab),
str(groupby),
str(self.column),
]
)
+ ".vectors"
)
fullname = path.join(self.embeddings_path, name)
if load:
try:
return self.load_models_from_store(fullname)
except FileNotFoundError:
pass
if window == "full":
window = max(len(d) for d in list(self.itersentences("document")))
print("Window set to {}".format(window))
models = OrderedDict()
# Get data shuffled to reduce training bias
sdata = self.shuffled_data()
# Create the base model, hs=1 and negative=0 are required by .score()
basemodel = gensim.models.Word2Vec(
workers=multiprocessing.cpu_count(),
iter=iterations,
window=window,
size=dimensions,
sg=sg,
hs=hs,
negative=0 if hs else 5,
)
if not localvocab:
basemodel.build_vocab(self.itersentences(mode, sdata[self.column]))
# Train a model for each group of documents
grouped_data = sdata.groupby((lambda x: 0) if groupby is None else groupby)
print("Training these models:", list(grouped_data.groups))
for gname, gdata in self.balance_groups(grouped_data, balance):
print("\rTraining {:<42}".format(gname), end="")
models[gname] = deepcopy(basemodel)
trainlist = list(self.itersentences(mode, gdata[self.column]))
if localvocab:
models[gname].build_vocab(trainlist)
models[gname].train(
trainlist, total_examples=len(trainlist), epochs=models[gname].iter
)
if store:
ioio.store(models[gname], Path(fullname, f"{gname}.pickle.xz"))
print("\nModels stored: {}".format(gname))
if store:
ioio.store(
self.shuffled_index,
Path(self.embeddings_path, f"{name}.index.pickle.xz"),
)
print("Model training index stored: {}".format(fullname + ".index"))
self.update_models(models, fullname)
def calc_scores(self, mode="document", lenfix=True):
allscores = pandas.DataFrame()
print("Calculating scores for: {}".format(list(self.embeddings.keys())))
for name, model in self.embeddings.items():
print("\rCalculating {:<42}".format(name), end="")
# Get sentences, indexes and length of documents to correct likelihoods
sentencelist = list(self.itersentences(mode=mode))
indexlist = list(self.indexsentences(mode=mode))
lenabs = pandas.Series(
(
len([w for w in sentence if w in model.wv.vocab])
for sentence in self.itersentences(mode=mode)
),
name="lenabs",
)
assert len(sentencelist) == len(indexlist) == len(lenabs)
# the score (log likelihood) of each sentence for the model
scores = pandas.Series(model.score(sentencelist, len(sentencelist)))
if lenfix:
if model.sg:
w = model.window
sgfix = lenabs.apply(
lambda l: max(0, l - 2 * w) * 2 * w
+ min(l, 2 * w) * min(l - 1, w)
+ sum([int(i / 2) for i in range(min(l, 2 * w))])
)
scores = scores.div(sgfix)
else:
scores = scores.div(lenabs) # abstract-size correction
scorenans = scores[scores.isnull()]
if not scorenans.empty:
print("NaN found for model {}: {}".format(name, list(scorenans.index)))
allscores[name] = scores.groupby(indexlist).mean().loc[self.data.index]
print()
return allscores
def load_scores(self, mode="document"):
print("Loading scores for {}".format(self.column))
fname = f"scores-{self.column}.pickle.xz"
try:
self.scores = ioio.load(Path(self.analysis_dir, fname))
except FileNotFoundError:
self.scores = self.calc_scores(mode)
ioio.store(self.scores, Path(self.analysis_dir, fname))
def plot_wordpair_similarity_matrix(
self, words, name="", scale="linear", upper=False, diagonal=True
):
functions = OrderedDict(
(mname, getattr(model, "similarity"))
for mname, model in self.embeddings.items()
)
return self.plot_wordpair_matrix(
words,
functions,
funcname="similarity",
name=name,
scale=scale,
upper=upper,
diagonal=diagonal,
)
def plot_wordpair_similarity_profile(self, words, name=""):
functions = OrderedDict(
(mname, getattr(model, "similarity"))
for mname, model in self.embeddings.items()
)
return self.plot_wordpair_profile(
words, functions, funcname="similarity", name=name
)
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/vectorology.py
| 0.505127 | 0.175573 |
vectorology.py
|
pypi
|
from numpy import (
exp,
log,
dot,
zeros,
outer,
random,
dtype,
float32 as REAL,
double,
uint32,
seterr,
array,
uint8,
vstack,
fromstring,
sqrt,
newaxis,
ndarray,
empty,
sum as np_sum,
prod,
ones,
ascontiguousarray,
vstack,
logaddexp,
)
from copy import deepcopy
def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start : (pos + model.window + 1)], start)
word2_indices = [
word2.index
for pos2, word2 in window_pos
if (word2 is not None and pos2 != pos)
]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)
return log_prob_sentence
def score_cbow_pair(model, word, word2_indices, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_words_cbow(model, sentence):
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
log_prob_words = [None] * len(sentence)
index = [i for i, w in enumerate(sentence) if w in model.wv.vocab]
word_vocabs = [model.wv.vocab[sentence[i]] for i in index]
for pos, (word, idx) in enumerate(zip(word_vocabs, index)):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start : (pos + model.window + 1)], start)
word2_indices = [
word2.index
for pos2, word2 in window_pos
if (word2 is not None and pos2 != pos)
]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_words[idx] = score_cbow_pair(model, word, word2_indices, l1)
return log_prob_words
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(
word_vocabs[start : pos + model.window + 1], start
):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sg_pair(model, word, word2):
l1 = model.wv.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_words_sg(model, sentence):
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
log_prob_words = [None] * len(sentence)
index = [i for i, w in enumerate(sentence) if w in model.wv.vocab]
word_vocabs = [model.wv.vocab[sentence[i]] for i in index]
for pos, (word, idx) in enumerate(zip(word_vocabs, index)):
log_prob_words[idx] = 0.0
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
for pos2, word2 in enumerate(
word_vocabs[start : pos + model.window + 1], start
):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_words[idx] += score_sg_pair(model, word, word2)
# TODO shoul divide a word's contribution by respective word2s size?
return log_prob_words
def score_words(model, sentence):
if not model.hs:
raise RuntimeError(
"We have currently only implemented score \
for the hierarchical softmax scheme, so you need to have \
run word2vec with hs=1 and negative=0 for this to work."
)
if model.sg:
return score_words_sg(model, sentence)
else:
return score_words_cbow(model, sentence)
def score_sentence(model, sentence):
if not model.hs:
raise RuntimeError(
"We have currently only implemented score \
for the hierarchical softmax scheme, so you need to have \
run word2vec with hs=1 and negative=0 for this to work."
)
if model.sg:
return score_sentence_sg(model, sentence)
else:
return score_sentence_cbow(model, sentence, None)
def score_word_pair(model, w1, w2):
if model.hs:
word = model.wv.vocab[w1]
word2 = model.wv.vocab[w2]
l1 = model.wv.syn0[word2.index]
# l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
else:
word = model.wv.vocab[w1]
word2 = model.wv.vocab[w2]
l1 = model.wv.syn0[word2.index]
l2 = model.syn1neg[word.index]
lprob = -logaddexp(0, -dot(l1, l2))
return lprob
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/w2v_score.py
| 0.621771 | 0.490663 |
w2v_score.py
|
pypi
|
import numpy, os, pandas
import lxml.html as html, lxml.html.builder as build
from collections import OrderedDict
from matplotlib import pyplot as plt, colors as colors
from scipy.special import logsumexp
from .w2v_score import score_words, score_sentence, score_word_pair
from .misc import display_html_in_browser
class Scorology:
"""
Analysis based on document scores provided by a model.
Usually operate without modifying core data.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = pandas.DataFrame()
def clear_scores(self):
self.scores = pandas.DataFrame()
def get_mode(self, mode):
"""
Useful transformation and normalizations to apply to scores
"""
# fix() makes values proportional to total ensemble probability,
# allowing for more meaningful comparisons between models
nanlogsumexp = lambda x: logsumexp(x[x.notnull()])
fix = lambda x: x.sub(x.apply(nanlogsumexp))
if mode == "raw":
trans = lambda x: x
norm = lambda x: x
elif mode == "rank":
trans = lambda x: fix(x).rank()
norm = lambda x: x.div(x.max())
elif mode == "score":
trans = lambda x: fix(x)
norm = lambda x: x
elif mode == "prob":
trans = lambda x: fix(x).apply(numpy.exp)
norm = lambda x: x
return dict(name=mode, trans=trans, norm=norm)
def get_scores(
self,
balance="nobalance",
balanceby=None,
noselfby=None,
normalize=True,
mode="score",
):
if self.scores.empty:
raise Exception("No scores loaded!")
scores = self.scores
if noselfby:
def removeself(x):
s = x.copy()
where = x.name == self.data[noselfby]
s[where] = numpy.nan
return s
scores = scores.apply(removeself)
if balance != "nobalance":
if balanceby:
locs = pandas.DataFrame().index
sdata = self.shuffled_data()
for group, gdata in self.balance_groups(
sdata.groupby(balanceby), balance
):
locs = locs.append(gdata.index)
scores = scores.loc[
locs
] # May duplicate index values, will clean below
else:
raise Exception('Balanced scoring requires "balanceby"')
scores = scores.loc[~scores.index.duplicated()] # Clean duplicates
print("get_scores(): len scores is {}".format(len(scores)))
mode = self.get_mode(mode)
scores = mode["norm"](mode["trans"](scores))
return scores
def get_scoremax(
self,
balance="nobalance",
balanceby=None,
noselfby=None,
style=None,
mode="score",
):
scores = self.get_scores(
balance=balance, balanceby=balanceby, noselfby=noselfby, mode=mode
)
cols = list(scores.columns)
cols = list(reversed(cols)) # For idxmax to break ties towards later groups
# numpy.random.shuffle(cols) # Check influence of idxmax taking 1st max idx found
scores = scores[cols]
scoremax = scores.idxmax(axis=1)
if style and style["name"] == "diff":
scoremax = scoremax - self.data.loc[scoremax.index, "year"]
if "limit" in style:
def limit_range(x, limit=style["limit"]):
if x < -limit:
return -limit
elif x > +limit:
return +limit
else:
return x
scoremax = scoremax.apply(limit_range)
print("Len scoremax is {}".format(len(scoremax)))
return scoremax
def get_scoreorder(
self, balance="nobalance", balanceby=None, noselfby=None, mode="score"
):
scores = self.get_scores(
balance=balance, balanceby=balanceby, noselfby=noselfby, mode=mode
)
scoresorted = scores.apply(
lambda x: x.sort_values(ascending=False).index, axis=1
)
return scoresorted
def plot_scores(
self,
groupby=None,
balance="nobalance",
noself=False,
yscale="linear",
mode="score",
):
yscale = {"value": yscale} if type(yscale) is str else yscale
scores = self.get_scores(
balance=balance,
balanceby=groupby,
noselfby=(groupby if noself else None),
mode=mode,
)
scores[groupby] = self.data[groupby]
# Each subplot is a model crossed with the corpus for each group
numfigrows = int(numpy.ceil(len(scores.columns) / 3))
axs = scores.boxplot(
by=groupby,
layout=(numfigrows, 3),
whis=[5, 95],
showmeans=1,
rot=90,
figsize=(17, 17),
)
for ax in axs:
for a in ax:
a.set_yscale(**yscale)
axs[0][0].get_figure().texts[0].set_text("Models by data")
plt.savefig(
os.path.join(
self.analysis_dir,
"{}-models_by_data-{}-{}.pdf".format(mode, balance, yscale["value"]),
)
)
plt.close()
del scores[groupby]
# Each subplot is the corpus for that group crossed with each of the models
grouped_data = self.data.groupby((lambda x: 0) if groupby is None else groupby)
plt.figure(figsize=(17, 17))
for i, (group, idxs) in enumerate(grouped_data.groups.items()):
plt.subplot(int(numpy.ceil(len(grouped_data) / 3)), 3, i + 1)
scores.loc[idxs]
ax = scores.loc[idxs].boxplot(whis=[5, 95], showmeans=1, return_type="axes")
ax.set_title(str(group))
ax.set_xticklabels([x.get_text()[-2:] for x in ax.get_xticklabels()])
ax.set_yscale(**yscale)
plt.tight_layout()
plt.suptitle("Data by models")
plt.savefig(
os.path.join(
self.analysis_dir,
"{}-data_by_models-{}-{}.pdf".format(mode, balance, yscale["value"]),
)
)
plt.close()
def plot_rankmax(
self,
groupby=None,
balance="nobalance",
noself=False,
scale="linear",
style=None,
mode="score",
):
rankmax = self.get_scoremax(
balance=balance,
balanceby=groupby,
noselfby=(groupby if noself else None),
style=style,
mode=mode,
)
maxbygroup = OrderedDict()
grouped_data = self.data.groupby((lambda x: 0) if groupby is None else groupby)
for group, idxs in grouped_data.groups.items():
grankmax = rankmax.loc[idxs]
maxbygroup[group] = grankmax.groupby(grankmax).count()
maxbygroup = pandas.DataFrame(maxbygroup).fillna(0).apply(lambda s: s / s.sum())
plt.figure(figsize=(10, 7))
norm = colors.LogNorm if scale == "log" else colors.Normalize
plt.pcolormesh(maxbygroup.as_matrix(), cmap="OrRd", norm=norm())
plt.yticks(numpy.arange(0.5, len(maxbygroup.index), 1), maxbygroup.index)
plt.xticks(
numpy.arange(0.5, len(maxbygroup.columns), 1),
maxbygroup.columns,
rotation=45,
)
plt.xlabel("data")
plt.ylabel("model")
plt.xlim(xmax=len(maxbygroup.columns))
plt.ylim(ymax=len(maxbygroup.index))
plt.colorbar()
plt.savefig(
os.path.join(
self.analysis_dir,
"{}max-{}-{}-{}-{}.pdf".format(
mode,
groupby if groupby else "nogroup",
balance,
"noself" if noself else "self",
scale,
),
)
)
plt.close()
def plot_rankmax_all(self, groupby=None):
for balance in ("randomsample", "randomfill", "antidemisample", "nobalance"):
for noself in (True, False):
for scale in ("log", "linear"):
self.plot_rankmax(groupby, balance, noself, scale)
def plot_profile(
self,
sample,
groupby=None,
balance="nobalance",
noself=None,
scale=None,
mode="score",
savefig=None,
):
scores = self.get_scores(
balance=balance,
balanceby=groupby,
noselfby=(groupby if noself else None),
mode=mode,
)
index = self.samplify(sample, scores)
for i in index:
print(i, "\n", self.data.loc[i], "\n\n")
plot_ext = dict(label=self.data.loc[i, groupby])
if noself:
marks = {
scores.columns[1]: [1],
scores.columns[-2]: [len(scores.columns) - 1],
}.setdefault(self.data.loc[i, groupby], [])
plot_ext.update(marker="o", markevery=marks)
plt.plot(scores.columns, scores.loc[i], **plot_ext)
if (
str(type(self)) == "<class 'sashimi.Vectorology.Vectorology'>"
and len(index) == 1
and False
):
plot_ext["label"] = "pp-{}".format(plot_ext["label"])
sentence = self.data.loc[i, self.column][0]
mode = self.get_mode(mode)
plt.plot(
list(self.embeddings),
mode["norm"](
mode["trans"](
pandas.DataFrame(
dict(
scores=[
(
score_sentence(self.embeddings[k], sentence)
/ len(sentence)
if not noself or k != self.data[groupby][i]
else numpy.nan
)
for k in self.embeddings
]
)
)
)
),
**plot_ext
)
if len(index) > 1:
plt.plot(
scores.columns,
scores.loc[index].mean(),
label="mean",
color="red",
linewidth=2,
)
if scale == "log":
plt.yscale("log")
# plt.ylim([0.0,0.0035])
plt.legend()
plt.xlabel("model")
if mode == "prob":
plt.ylabel("likelihood (e^score)")
elif mode == "score":
plt.ylabel("log-likelihood (score)")
if savefig:
plt.savefig(savefig, format="svg")
else:
plt.show()
input("Enter to close plot!")
plt.close()
def plot_wordpair_score_matrix(
self, words, name="", scale="linear", upper=False, diagonal=True
):
function = lambda m: lambda x, y: numpy.exp(score_word_pair(m, x, y))
functions = OrderedDict(
(mname, function(model)) for mname, model in self.embeddings.items()
)
return self.plot_wordpair_matrix(
words,
functions,
funcname="prob",
name=name,
scale=scale,
upper=upper,
diagonal=diagonal,
)
def plot_wordpair_score_profile(self, words, name=""):
function = lambda m: lambda x, y: numpy.exp(score_word_pair(m, x, y))
functions = OrderedDict(
(mname, function(model)) for mname, model in self.embeddings.items()
)
return self.plot_wordpair_profile(words, functions, funcname="prob", name=name)
def annotate_sentences(self, sample, measures=["variance", "mean", "str"]):
index = self.samplify(sample, self.data)
print("Index is {}".format(list(index)))
annotated = pandas.DataFrame(index=index, columns=measures, dtype=object)
sentences = self.data.loc[index, self.column].apply(lambda x: sum(x, ()))
for i in index:
sent_scores = pandas.DataFrame(
dict(
(name, score_words(model, sentences[i]))
for name, model in self.embeddings.items()
)
)
sent_scores.loc[:, self.data.year[i]] = numpy.nan
lenabs = len(
[
w
for w in sentences[i]
if w in list(self.embeddings.values())[0].wv.vocab
]
)
sent_probs = sent_scores.div(lenabs).apply(numpy.exp)
if "variance" in measures: # relative variance
annotated["variance"].set_value(
i, sent_probs.std(axis=1).div(sent_probs.mean(axis=1))
)
if "mean" in measures:
annotated["mean"].set_value(i, sent_probs.mean(axis=1))
if "str" in measures:
annotated["str"].set_value(i, sent_probs.apply(str, axis=1))
return {"sentences": sentences, "notes": annotated}
def publish_annotations(
self, sentences, notes, title="Annotated sentences", savename=None
):
from matplotlib import colors, cm, colorbar
import io
from base64 import b64encode
div0 = build.DIV()
if title:
div0.append(build.H1(title))
for i in sentences.index:
div1 = build.DIV(style="clear:both; padding:1em")
with io.BytesIO() as bimg:
self.plot_profile(
[i], groupby="year", noself=True, savefig=bimg, mode="prob"
)
img_p = build.IMG(
style="float:right;",
src="data:image/svg+xml;base64,"
+ b64encode(bimg.getvalue()).decode(),
)
trunc = lambda x: x if len(x) <= 200 else x[:200] + " ..."
d_id = self.data[self._labels["id"]][i]
d_title = self.data[self._labels["title"]][i]
d_venue = self.data[self._labels["venue"]][i]
d_authors = self.data[self._labels["authors"]][i]
d_authors = d_authors if type(d_authors) is tuple else [str(d_authors)]
d_authors = trunc(", ".join(d_authors))
d_affilia = self.data[self._labels["affiliations"]][i]
d_affilia = d_affilia if type(d_affilia) is tuple else [str(d_affilia)]
d_affilia = "({})".format(trunc("; ".join(d_affilia)))
h2 = build.H2(
build.A(
str(d_id),
href="https://www.ncbi.nlm.nih.gov/pubmed/{}".format(d_id),
style="text-decoration:none; color:#000099",
)
)
p_m = build.P(
build.EM(d_title),
build.BR(),
build.SPAN(d_authors),
build.BR(),
build.SPAN(d_affilia),
build.BR(),
build.EM(d_venue),
)
p_a = build.P()
m_color, m_size, m_title = "variance", "mean", "str"
note0, note1, note2 = (
notes.loc[i, m_color],
notes.loc[i, m_size],
notes.loc[i, m_title],
)
norm0 = colors.Normalize()
normed0 = norm0(note0[note0.notnull()])
cseq = pandas.Series(index=note0.index, dtype=object)
cseq.loc[note0.notnull()] = list(cm.coolwarm(normed0))
norm1 = colors.Normalize()
normed1 = norm1(note1[note1.notnull()])
for word, clr, fs, n0, n1, n2 in zip(
sentences[i], cseq, normed1, note0, note1, note2
):
if type(clr) is float and numpy.isnan(clr):
style = "font-size: small"
else:
style = "color: {};".format(colors.rgb2hex(clr))
style += "font-size: {}em;".format(1 + fs)
s = build.SPAN(
word, style=style, title="Var: {}\nMean: {}\n{}".format(n0, n1, n2)
)
s.tail = " "
p_a.append(s)
p_a[-1].tail = ""
with io.BytesIO() as bimg:
fig = plt.figure(figsize=(1, 6))
ax = fig.add_axes([0, 0.1, 0.3, 0.8])
cb = colorbar.ColorbarBase(
ax, cmap=cm.coolwarm, norm=norm0, orientation="vertical"
)
cb.set_label("word-level {}".format(m_color))
plt.savefig(bimg, format="svg")
img_c = build.IMG(
style="float:right; margin-left:1em",
src="data:image/svg+xml;base64,"
+ b64encode(bimg.getvalue()).decode(),
)
plt.close()
div1.extend((img_p, img_c, h2, p_m, p_a))
div0.append(div1)
if savename:
doc = build.HTML(build.HEAD(build.TITLE(title)))
doc.append(build.BODY(div0))
with open(os.path.join(self.analysis_dir, savename), "wb") as f:
f.write(html.tostring(doc))
else:
display_html_in_browser(div0, title=title)
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/scorology.py
| 0.757077 | 0.396185 |
scorology.py
|
pypi
|
from itertools import chain
from logging import getLogger
import re
import pandas as pd
log = getLogger(__name__)
try:
import spacy
from spacy.util import compile_infix_regex
except ImportError:
log.warning("Spacy could not be imported: tokenization will be unavailable.")
try:
from gensim.models import phrases
except ImportError:
log.warning("Gensim could not be imported: ngrams will be unavailable.")
def process_token_sources(token_sources, to_doc=None):
to_doc = tokens_to_doc if to_doc is None else to_doc
return token_sources.applymap(to_doc, na_action="ignore").agg(
lambda row: [sen for doc in row.dropna() for sen in doc], axis=1
)
def process_text_sources(text_sources, language=None, ngrams=3, stop_words="language"):
nlp = get_nlp(language or "en")
if (stop_words == "language") or (stop_words is True):
stop_words = nlp.Defaults.stop_words
elif not stop_words:
stop_words = {}
else:
stop_words = {*stop_words}
docs = text_to_tokens(text_sources, nlp)
translate_ngrams = make_translate_ngrams(docs, ngrams=ngrams)
docs = docs.map(translate_ngrams)
filter_tokens = make_filter_tokens(stop_words=stop_words)
docs = docs.map(filter_tokens)
return docs
def get_nlp(language_code):
"""
TODO: generalize correction to infixes
"""
nlp = spacy.blank(language_code)
nlp.add_pipe("sentencizer")
if language_code == "en":
infixes = nlp.Defaults.infixes.copy()
suffixes = nlp.Defaults.suffixes.copy()
# do not split on simple hyphen: hot-dog
old_part = r"(?:-|–|—|--|---|——|~)"
new_part = old_part.replace(r":-|", r":")
infixes[-2] = infixes[-2].replace(old_part, new_part)
# split on relation followed by numeral: value:23 number>3 x/y
spot = infixes[-1].rindex("[:<>=/](?=[")
infixes[-1] = infixes[-1][: spot + 11] + "0-9" + infixes[-1][spot + 11 :]
# split on suffix square bracket numeric citations: cite work[1,13]
old_part = r"\."
new_part = r"(?:\[[0-9][0-9,]*\])?" + old_part
suffixes[-1] = suffixes[-1].replace(old_part, new_part)
suffixes[-2] = suffixes[-2].replace(old_part, new_part)
# compile and replace
nlp.tokenizer.infix_finditer = compile_infix_regex(infixes).finditer
nlp.tokenizer.suffix_search = compile_infix_regex(suffixes).search
return nlp
def text_to_tokens(text_sources, nlp):
"""
Obs: fully NA rows will be empty documents.
"""
def text_to_doc(text):
return [[tok.text for tok in sent] for sent in nlp(text).sents]
def texts_to_doc(texts):
if isinstance(texts, str):
return text_to_doc(texts)
else:
return chain.from_iterable(text_to_doc(text) for text in texts)
return (
text_sources.applymap(texts_to_doc, na_action="ignore")
.agg(lambda row: [sen for doc in row.dropna() for sen in doc], axis=1)
.map(lambda doc: [[wor.casefold() for wor in sen] for sen in doc])
)
def make_translate_ngrams(docs, ngrams=3, threshold=0.9):
if ngrams not in range(1, 4):
raise ValueError("`ngrams` must be one of 1, 2, 3")
if ngrams == 1:
translate_ngrams = lambda x: x # noqa
phrases_args = {"scoring": "npmi", "threshold": threshold}
if ngrams > 1:
bigram = phrases.Phraser(
phrases.Phrases((s for d in docs for s in d), **phrases_args)
)
translate_ngrams = lambda doc_or_sen: [*bigram[doc_or_sen]] # noqa
if ngrams > 2:
trigram = phrases.Phraser(
phrases.Phrases(bigram[(s for d in docs for s in d)], **phrases_args)
)
translate_ngrams = lambda doc_or_sen: [*trigram[bigram[doc_or_sen]]] # noqa
return translate_ngrams
def make_filter_tokens(stop_words={}):
rx_alpha = re.compile(r"[^\W\d_]")
def word_filter(tok):
return (
(len(tok) > 1)
and (tok.casefold() not in stop_words)
and (rx_alpha.search(tok))
)
def filter_tokens(doc):
return [[tok for tok in sen if word_filter(tok)] for sen in doc]
return filter_tokens
def get_naive_tokenizer():
"""Do we send this to 'limbo/'?"""
from itertools import chain
re_sentence = re.compile(r"[\.!?][\s$]")
re_term = re.compile(r"[^\w@-]+")
re_alpha = re.compile(r"[^\W\d_]")
def to_sentences(doc):
if isinstance(doc, str):
return re_sentence.split(doc)
else:
return chain.from_iterable(re_sentence.split(doc_) for doc_ in doc)
def to_tokens(sentence):
return [
word
for inter in sentence.casefold().split()
for dirty in re_term.split(inter)
for word in [dirty.strip(".-_")]
if re_alpha.search(word)
]
def tokenize(doc):
return [
tokens
for sentence in to_sentences(doc)
for tokens in [to_tokens(sentence)]
if tokens
]
return tokenize
def strict_na(obj):
"""
pd.isna() applies element-wise to arrays and lists, but we want `False`.
"""
is_na = pd.isna(obj)
return is_na if isinstance(is_na, bool) else False
def flatten_nested_containers(obj, class_or_tuple=(list, tuple), dropna=False):
if isinstance(obj, class_or_tuple):
for ob in obj:
yield from flatten_nested_containers(ob, class_or_tuple, dropna)
elif not dropna or not strict_na(obj):
yield obj
def wordsentence(doc, in_class_or_tuple=(list, tuple), out_class=list):
"""
Convert element to Document[Sentence[Word...]...] form
"""
if strict_na(doc):
return doc
if isinstance(doc, in_class_or_tuple):
return out_class(
out_class(flatten_nested_containers(sen, in_class_or_tuple, True))
for sen in doc
if not strict_na(sen)
)
return out_class([out_class([doc])])
def tokens_to_doc(tokens):
return [[x] if isinstance(x, str) else [*x] for x in tokens]
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/corpus/nlp.py
| 0.42322 | 0.226848 |
nlp.py
|
pypi
|
import numpy as np
import pandas
def sorted_hierarchical_block_index(blocks, levels, level):
return sorted(
tuple(reversed(x))
for x in (
blocks[list(levels[levels.index(level) :])] # list() in case passed a tuple
.groupby(level)
.first()
.itertuples()
)
)
def make_normalization_factor(kind):
"""
Returns a function to get the normalization factor for a set of values
"""
if kind is None:
return lambda vals: 1
elif kind == "bylevel":
return lambda vals: vals.loc[np.isfinite(vals)].abs().sum() or 1
elif kind == "bylevelmax":
return lambda vals: vals.loc[np.isfinite(vals)].abs().max() or 1
else:
raise ValueError
def make_normalization_factor_js():
return """
function make_normalization_factor(kind) {
if (kind == "bylevelmax") {
return value => Math.max(...value.map(Math.abs).filter(Number.isFinite)) || 1
} else if (kind == "bylevel") {
const sum = (a, b) => a + b
return value => value.map(Math.abs).filter(Number.isFinite).reduce(sum, 0) || 1
} else {
return value => 1
}
}
"""
def try_datetime(series):
is_datetime = issubclass(series.dtype.type, (np.datetime64, pandas.Period))
if is_datetime:
return is_datetime, series
if series.dropna().map(lambda x: isinstance(x, str)).all():
try:
series = to_datetimeindex(series)
is_datetime = True
except Exception:
pass
if is_datetime:
return is_datetime, series
if not issubclass(series.dtype.type, np.number):
try:
series = series.astype(int)
except Exception:
series = series.astype(float)
except Exception:
pass
if issubclass(series.dtype.type, np.number):
try:
if series.min().ge(1678) and series.max().le(2262):
series.loc[series.notna()] = series.dropna().astype(int).astype(str)
series = to_datetimeindex(series)
elif series.min() < 0 or series.max() > 999999:
series = to_datetimeindex(series, unit="s")
is_datetime = True
except Exception:
pass
return is_datetime, series
def to_datetimeindex(series, **kwargs):
return pandas.DatetimeIndex(pandas.to_datetime(series, dayfirst=True, **kwargs))
def try_period_get_range(series):
if issubclass(series.dtype.type, pandas.Period):
full_range = pandas.period_range(series.min(), series.max())
elif issubclass(series.dtype.type, np.datetime64) and (freq := get_freq(series)):
full_range = pandas.period_range(series.min(), series.max(), freq=freq)
series = series.to_period(freq=freq)
else:
full_range = series.drop_duplicates().sort_values()
return series, full_range
def get_freq(series):
valid = series.dropna()
return (
"A"
if (valid.is_year_start.all() or valid.is_year_end.all())
else "M"
if (valid.is_month_start.all() or valid.is_month_end.all())
else "D"
if valid.is_normalized
else False
)
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/blocks/util.py
| 0.674587 | 0.522446 |
util.py
|
pypi
|
import pandas
from collections import Counter
from tqdm import tqdm
from ..ioio import ioio
from ..naming import naming
"""
Methods to be used with hierarchical_block_map.
'zmethods' should be defined as:
def example(corpus, blocks, level, index)
and should return a pandas.Series of a scalar dtype and indexed by 'index'.
"""
def __init__():
return
def count(corpus, blocks, level, index):
count = blocks.groupby(blocks.loc[:, level]).size()
count = count.reindex(index)
count = count.where(count.notnull(), 0)
return count
def density(corpus, blocks, level, index):
count = blocks.groupby(blocks.loc[:, level]).size()
count = count.reindex(index)
count = count.where(count.notnull(), 0)
dens = count / count.sum()
return dens
def x_doc_density_gen(btype):
def x_doc_density(corpus, xblocks, xlevel, index):
x_documents = getattr(corpus, f"{btype}_documents")
x_groups = xblocks[xlevel].groupby(xblocks[xlevel])
count = pandas.Series(index=xblocks[xlevel].unique())
for n, g in tqdm(x_groups, desc=f"Level {xlevel}"):
s = set()
for x in g.index:
s.update(x_documents[x])
count.loc[n] = len(corpus.data.index.intersection(s))
count = count.reindex(index)
count = count.where(count.notnull(), 0)
value = count / len(corpus.data)
return value
x_doc_density.__name__ = f"{btype}_doc_density"
return x_doc_density
def x_link_density_gen(btype):
def x_doc_density(corpus, xblocks, xlevel, index):
x_documents = getattr(corpus, f"{btype}_documents")
x_groups = xblocks[xlevel].groupby(xblocks[xlevel])
count = pandas.Series(0, index=xblocks[xlevel].unique())
data_index = set(corpus.data.index)
for n, g in tqdm(x_groups, desc=f"{btype.capitalize()} density level {xlevel}"):
for x in g.index:
count.loc[n] += sum(
v for k, v in x_documents[x].items() if k in data_index
)
count = count.reindex(index)
count = count.where(count.notnull(), 0)
docs = corpus.get_doc_terms() if btype == "ter" else corpus.get_doc_exts()
value = count / docs.transform(len).sum()
return value
x_doc_density.__name__ = "{}_doc_density".format(btype)
return x_doc_density
# Auxiliary methods, used in zmethods or to generate them
def density_pair_gen(idx0, idx1, func):
def density_pair(corpus, blocks, level, index):
count0 = blocks.loc[idx0].groupby(blocks[level]).size()
count0 = count0.reindex(index)
count0 = count0.where(count0.notnull(), 0)
dens0 = count0 / count0.sum()
count1 = blocks.loc[idx1].groupby(blocks[level]).size()
count1 = count1.reindex(index)
count1 = count1.where(count1.notnull(), 0)
dens1 = count1 / count1.sum()
value = func(dens0, dens1)
return value.where(value.notnull(), 1) # 0/0 => 1
density_pair.__name__ = "density_{}_{}_{}".format(
func.__name__, idx0.name, idx1.name
)
return density_pair
def x_doc_density_pair_gen(idx0, idx1, func, btype):
def x_doc_density_pair(corpus, xblocks, xlevel, index):
x_documents = getattr(corpus, f"{btype}_documents")
x_groups = xblocks[xlevel].groupby(xblocks[xlevel])
count0 = pandas.Series(index=xblocks[xlevel].unique())
count1 = pandas.Series(index=xblocks[xlevel].unique())
index0 = corpus.data.index.intersection(idx0)
index1 = corpus.data.index.intersection(idx1)
for n, g in tqdm(x_groups):
s = set()
for x in g.index:
s.update(x_documents[x])
count0.loc[n] = len(index0.intersection(s))
count1.loc[n] = len(index1.intersection(s))
count0 = count0.reindex(index)
count0 = count0.where(count0.notnull(), 0)
count1 = count1.reindex(index)
count1 = count1.where(count1.notnull(), 0)
value = func(count0 / index0.size, count1 / index1.size)
return value.where(value.notnull(), 1)
x_doc_density_pair.__name__ = "{}_doc_density_{}_{}_{}".format(
btype, func.__name__, idx0.name, idx1.name
)
return x_doc_density_pair
def x_link_density_pair_gen(idx0, idx1, func, btype):
def x_doc_density_pair(corpus, xblocks, xlevel, index):
x_documents = getattr(corpus, f"{btype}_documents")
x_groups = xblocks[xlevel].groupby(xblocks[xlevel])
count0 = pandas.Series(index=xblocks[xlevel].unique())
count1 = pandas.Series(index=xblocks[xlevel].unique())
index0 = set(corpus.data.index.intersection(idx0))
index1 = set(corpus.data.index.intersection(idx1))
for n, g in tqdm(x_groups):
for x in g.index:
count0.loc[n] = sum(v for k, v in x_documents[x].items() if k in index0)
count1.loc[n] = sum(v for k, v in x_documents[x].items() if k in index1)
count0 = count0.reindex(index)
count0 = count0.where(count0.notnull(), 0)
count1 = count1.reindex(index)
count1 = count1.where(count1.notnull(), 0)
docs = corpus.get_doc_terms() if btype == "ter" else corpus.get_doc_exts()
value = func(
count0 / docs.loc[docs.index.intersection(idx0)].transform(len).sum(),
count1 / docs.loc[docs.index.intersection(idx1)].transform(len).sum(),
)
return value.where(value.notnull(), 1)
x_doc_density_pair.__name__ = "{}_doc_density_{}_{}_{}".format(
btype, func.__name__, idx0.name, idx1.name
)
return x_doc_density_pair
def get_cross_counts(corpus, ybtype, ltype):
"""
Pairs every domain from every level with every cross block from every
level (topics or extended blocks) and counts the number of links or documents
connected to each cross block.
(corpus)
(ybtype): str
The block type to cross. Either 'ter' or 'ext'.
(ltype) str
Either 'link' or 'doc'. Whether to count links or documents.
Result
------
A pandas.Series with MultiIndex:
(domain level, domain, cross level, cross block)
"""
use_cache = corpus.use_cached_cross_counts
dblocks = corpus.dblocks
yblocks, yblocks_levels, _ = corpus.get_blocks_levels_sample(ybtype)
y_documents = corpus.get_xelement_yelements(ybtype, "doc")
fname_params = [("ybtype", ybtype), ("ltype", ltype)]
if sample_hash := corpus.get_sample_hash(
**{x: x in {"doc", ybtype} for x in ["doc", "ter", "ext"]}
):
fname_params = [("sample", sample_hash), *fname_params]
if ybtype == "ter":
fdir = corpus.blocks_dir
elif ybtype == "ext":
fdir = corpus.chained_dir
fpath = fdir / "cache" / naming.gen("cross_counts", fname_params, corpus.ext_data)
if use_cache:
try:
values = ioio.load(fpath)
if isinstance(values, dict):
values = pandas.Series(
index=pandas.MultiIndex.from_tuples(values["index"]),
data=(x for x in values["data"]),
)
print("Loaded cached cross counts")
return values
except FileNotFoundError:
pass
keys, vals = [], []
for ylevel in tqdm(yblocks_levels, desc="Cross level"):
y_groups = yblocks[ylevel].groupby(yblocks[ylevel])
for yb, yg in tqdm(y_groups, desc=f" {ybtype.capitalize()} block"):
yb_docs = Counter() if ltype == "link" else set()
for ye in yg.index:
yb_docs.update(y_documents[ye])
for level in tqdm(corpus.dblocks_levels, desc=" Doc level"):
doc_groups = dblocks[level].groupby(dblocks[level])
for b, g in doc_groups:
keys.append((level, b, ylevel, yb))
if ltype == "link":
g_index = set(g.index)
vals.append(sum(v for k, v in yb_docs.items() if k in g_index))
else: # ltype == 'doc'
vals.append(len(g.index.intersection(yb_docs)))
values = pandas.Series(vals, index=pandas.MultiIndex.from_tuples(keys))
if use_cache:
print("Storing cross counts")
ioio.store_pandas(values, fpath)
return values
def p_diff(a, b):
return a - b
def p_rel(a, b):
return a / b
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/blocks/zmethods.py
| 0.755186 | 0.414958 |
zmethods.py
|
pypi
|
from collections import Counter
from functools import cache
import pandas as pd
from ..corpus import Corpus
from .hierarchical_block_map import composite_hierarchical_block_map
from . import zmethods
from .tables import subxblocks_report, xblocks_report
from .network_map import network_map
class Blocks:
"""
Methods to produce visualisations and analysis from an existing blockstate.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.col_title is None:
print("Warning: `col_title` is not set")
def get_blocks_levels(self, btype=None):
"""
Parameters
----------
btype: the type of block to return ('doc', 'ter' or 'ext')
Returns
-------
blocks: the original blocks
levels: the levels
"""
if btype is None:
return {
"doc": (self.dblocks, self.dblocks_levels),
"ter": (self.tblocks, self.tblocks_levels),
"ext": (self.eblocks, self.eblocks_levels),
}
elif btype == "doc":
return self.dblocks, self.dblocks_levels
elif btype == "ter":
return self.tblocks, self.tblocks_levels
elif btype == "ext":
return self.eblocks, self.eblocks_levels
else:
raise ValueError("Unrecognized `btype`.")
def get_blocks_levels_sample(self, btype):
"""
If data is sampled, we need to sample the corresponding entries
from the blocks as well.
Parameters
----------
btype: the type of block to return ('doc', 'ter' or 'ext')
Returns
-------
blocks: the original blocks
levels: the levels
sblocks: the blocks restricted to the data sample
"""
blocks, levels = self.get_blocks_levels(btype)
if btype == "doc":
sblocks = self.dblocks.loc[self.data.index]
elif btype == "ter":
sblocks = self.tblocks.loc[
self.tblocks.index.intersection(self.get_vocab())
]
elif btype == "ext":
sblocks = blocks # TODO actually sample eblocks
return blocks, levels, sblocks
def hblock_to_level_block(self, hb, btype):
_, levels = self.get_blocks_levels(btype)
level = len(levels) - len(hb) + 1
return ("v" if level == 0 else level), hb[-1]
def level_block_to_hblock(self, level, block, btype):
blocks, levels = self.get_blocks_levels(btype)
if level == "v":
levels = levels.copy()
levels.insert(0, "v")
b = blocks.loc[blocks[level].eq(block), levels[levels.index(level) :]]
return tuple(reversed(b.iloc[0].tolist()))
def domain_labels_to_selection(self, labels):
level_blocks = {}
for label in labels:
for level, block in [
self.hblock_to_level_block(self.label_to_hblock[label], "doc")
]:
level_blocks.setdefault(level, []).append(block)
sel = pd.Series(False, self.data.index)
for level, blocks in level_blocks.items():
sel |= self.dblocks[level].isin(blocks)
return sel
def set_sample(self, sample, keep=False):
Corpus.set_sample(self, sample=sample, keep=keep)
if hasattr(self, "dblocks"):
if not hasattr(self, "_orig_dblocks"):
self._orig_dblocks = self.dblocks
if keep:
self.dblocks = self.dblocks.loc[self.data.index]
else:
self.dblocks = self._orig_dblocks.loc[self.data.index]
self.gen_mapindex()
def find_blocks(self, lscape, finder):
"""
Returns the blocks that correspond to features defined in finder.
For example, blocks with the highest or lowest values, or with
the highest or lowest differences in values across the landscape.
Parameters
----------
lscape: dict of pandas.Series or pandas.DataFrames
The landscape of values for each level over its blocks.
finder: str or function
Finds the desired blocks in the landscape.
Returns
-------
found: dict
Dictionary containing whatever `finder` looked for.
"""
if callable(finder):
return finder(self, lscape)
found = dict()
if finder == "level_max_min_absmin":
for level, ls_l in lscape.items():
if isinstance(ls_l, pd.Series):
found[level] = dict(
max=ls_l.idxmax, min=ls_l.idxmin, absmin=ls_l.abs().idxmin()
)
elif isinstance(ls_l, pd.DataFrame):
idxmax = ls_l.max().idxmax()
idxmin = ls_l.min().idxmin()
idxabsmin = ls_l.abs().min().idxmin()
found[level] = dict(
max=(ls_l.idxmax()[idxmax], idxmax),
min=(ls_l.idxmin()[idxmin], idxmin),
absmin=(ls_l.abs().idxmin()[idxabsmin], idxabsmin),
)
else:
raise ValueError("Unrecognized type in values of `lscape`.")
return found
def domain_map(self, title=None, diff_idxs=None, chained=False, **kwargs):
idx_all = self.data.index.copy()
idx_all.name = "all"
btype = "ext" if chained else "ter"
if not diff_idxs:
kwargs_ = dict(
norm=["bylevelmax", "bylevelmax"],
scale=["linear", "linear"],
bheight=["proval", "hierarchical"],
)
kwargs_.update(kwargs)
return composite_hierarchical_block_map(
self,
["doc", btype],
zmethod=[
zmethods.density,
zmethods.x_link_density_gen(btype),
],
link_p_func=zmethods.p_rel,
page_title=title,
**kwargs_,
)
else:
idx0, idx1 = diff_idxs
kwargs_ = dict(
norm=["bylevelmax", "bylevelmax"],
scale=["linear", "log"],
bheight=["proval", "hierarchical"],
)
kwargs_.update(kwargs)
return composite_hierarchical_block_map(
self,
["doc", btype],
zmethod=[
zmethods.density_pair_gen(idx0, idx1, zmethods.p_diff),
zmethods.x_link_density_pair_gen(idx0, idx1, zmethods.p_rel, btype),
],
link_p_func=zmethods.p_rel,
page_title=title,
**kwargs_,
)
def subxblocks_tables(self, xbtype, xlevel, xb, ybtype, ylevel=1):
outpaths = []
outdir = self.blocks_adir if ybtype == "ter" else self.chained_adir
if xb is None:
xblocks, _ = self.get_blocks_levels(xbtype)
xbtargets = xblocks[xlevel].unique()
elif isinstance(xb, list):
xbtargets = xb
else:
xbtargets = [xb]
for xbt in xbtargets:
if sample_hash := self.get_sample_hash(doc=True, ter=True, ext=True):
sample_hash = f"-sample:{sample_hash}"
outfile = f"table{sample_hash}-{self.lblock_to_label[xlevel, xbt]}"
outfile += f"-L{ylevel}{ybtype[0].upper()}.html"
fname = subxblocks_report(
self,
xbtype,
xlevel,
xbt,
ybtype,
ylevel,
outfile=outdir / outfile,
)
outpaths.append(fname)
return outpaths if isinstance(xb, list) or xb is None else outpaths.pop()
def xblocks_tables(self, *args, **kwargs):
return xblocks_report(self, *args, **kwargs)
def domain_network(self, *args, **kwargs):
return network_map(self, "doc", *args, **kwargs)
def network_map(self, *args, **kwargs):
return network_map(self, *args, **kwargs)
def filter_topic_terms_from_corpus(self, tlevel, tblock, column=None):
"""
Removes all terms belonging to a topic from the corpus.
"""
topic_terms = set(self.tblocks[self.tblocks[tlevel].eq(tblock)].index)
return self.filter_terms(lambda term: term not in topic_terms, column)
@cache
def get_xelement_yelements(self, xbtype, ybtype):
if (xbtype, ybtype) == ("doc", "ter"):
series = pd.Series(self.get_doc_terms(), index=self.dblocks.index)
if (xbtype, ybtype) == ("doc", "ext"):
series = pd.Series(self.get_doc_exts(), index=self.dblocks.index)
if (xbtype, ybtype) == ("ter", "doc"):
series = pd.Series(self.ter_documents, index=self.tblocks.index)
if (xbtype, ybtype) == ("ext", "doc"):
series = pd.Series(self.ext_documents, index=self.eblocks.index)
return series.map(
lambda x: x
if isinstance(x, Counter)
else Counter()
if (isinstance(isna := pd.isna(x), bool) and isna)
else Counter(x)
)
def get_xsel_yblocks_counts(self, xbtype, xsel, ybtype, ylevel):
xelement_yelements = self.get_xelement_yelements(xbtype, ybtype)
xblocks, _ = self.get_blocks_levels(xbtype)
yblocks, _ = self.get_blocks_levels(ybtype)
yelement2yblock = (lambda x: x) if ylevel is None else yblocks[ylevel].get
xblock_yblocks_c = Counter()
for val in xelement_yelements.loc[xsel]:
xblock_yblocks_c.update(yelement2yblock(el) for el in val.elements())
return xblock_yblocks_c
@cache
def get_xblock_yblocks_counts(self, xbtype, xlevel, xb, ybtype, ylevel):
xblocks, _ = self.get_blocks_levels(xbtype)
xsel = (xblocks.index == xb) if xlevel is None else xblocks[xlevel].eq(xb)
return self.get_xsel_yblocks_counts(xbtype, xsel, ybtype, ylevel)
def get_antixblock_sel(self, xbtype, xlevel, xb, antixlevel=None):
"""
Selects the complement of a block `xb` of level `xlevel` and type `xbtype`.
(antixlevel): restrict selection to this parent (higher) xlevel
"""
xblocks, _ = self.get_blocks_levels(xbtype)
xsel = (xblocks.index == xb) if xlevel is None else xblocks[xlevel].eq(xb)
xsel = ~xsel
if antixlevel is not None:
if antixlevel <= xlevel:
raise ValueError("`antixlevel` must be higher than `xlevel`")
antixlevel_xblock_containing_xb = xblocks.loc[~xsel, antixlevel].iloc[0]
antixsel = xblocks[antixlevel].eq(antixlevel_xblock_containing_xb)
xsel = xsel & antixsel
return xsel
@cache
def get_antixblock_yblocks_counts(
self, xbtype, xlevel, xb, antixlevel=None, ybtype="ter", ylevel=1,
):
xsel = self.get_antixblock_sel(xbtype, xlevel, xb, antixlevel)
return self.get_xsel_yblocks_counts(xbtype, xsel, ybtype, ylevel)
def get_xblock_yblocks_stat(
self, stat, xbtype, xlevel, xb, ybtype, ylevel, ybs=None
):
"""
Gets the `stat` = "count" or "frac_presence" of yblocks in xblocks.
"""
ybs = None if ybs is None else set(ybs)
if stat not in ("count", "frac_presence"):
raise ValueError(f"Unkown statistics: {stat}")
xelements_yelements = self.get_xelement_yelements(xbtype, ybtype)
xblocks, _ = self.get_blocks_levels(xbtype)
yblocks, _ = self.get_blocks_levels(ybtype)
# functions to abstract different cases
yelement2yblock = (lambda x: x) if ylevel is None else yblocks[ylevel].get
def yelements2yblocks(xel_yels):
return (yelement2yblock(el) for el in xel_yels.elements())
yelements2yblocks4stat = (
yelements2yblocks
if (stat == "count")
else (lambda xel_yels: set(yelements2yblocks(xel_yels)))
)
any_cum_func = len if stat == "count" else bool
# calculations
xblock_yblocks_c = Counter()
xblock_yblocks_any = 0
sel = (xblocks.index == xb) if xlevel is None else xblocks[xlevel].eq(xb)
for xel_yels in xelements_yelements.loc[sel]:
yblocks4stat = [*yelements2yblocks4stat(xel_yels)]
xblock_yblocks_c.update(yblocks4stat)
xblock_yblocks_any += any_cum_func(
[yb for yb in yblocks4stat if (ybs is None) or (yb in ybs)]
)
stat_s = pd.Series(xblock_yblocks_c).sort_values()
stat_s.name = self.lblock_to_label[xlevel, xb]
stat_s["any"] = xblock_yblocks_any
stat_s = stat_s.div(sel.sum()) if stat == "frac_presence" else stat_s
return stat_s
def get_dblock_xcount(self, b, btype):
dlevel, db = self.hblock_to_level_block(b, "doc")
xc = self.get_xblock_yblocks_counts("doc", dlevel, db, btype, None)
xct = sum(xc.values())
return xc, xct
def get_dblock_xblock_count(self, b, btype):
dlevel, db = self.hblock_to_level_block(b, "doc")
xc = self.get_xblock_yblocks_counts("doc", dlevel, db, btype, 1)
xct = sum(xc.values())
return xc, xct
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/blocks/__init__.py
| 0.76882 | 0.290251 |
__init__.py
|
pypi
|
from functools import cache, partial
import numpy as np
from tqdm import tqdm
from bokeh.models.callbacks import CustomJS
from .util import sorted_hierarchical_block_index, make_normalization_factor_js
def link_maps(
corpus,
source_btype,
source_fig,
source_blocks,
source_levels,
target_btype,
target_fig,
target_blocks,
target_levels,
values,
selection_mode,
pfunc=None,
norm="bylevelmax",
):
"""
Once linked, clicking on a block in the source map produces a change on the target map's z-axis.
Currently this implements an x_{doc,link}_density_pair calculation with pfunc against all.
Tested with norm='bylevelmax' and pfunc=p_rel (scale='log').
Parameters
==========
values:
A `pandas.Series` of scalar values with `MultiIndex` like
(source level, source block, target level, target block)
"""
# flip around `values` if docs are target, and make sure index is sorted
if target_btype == "doc":
values = values.reorder_levels([2, 3, 0, 1])
values = values.sort_index()
# size calculating functions
if "ter" in (source_btype, target_btype):
doc_xs_sizes = corpus.get_doc_terms().transform(len)
elif "ext" in (source_btype, target_btype):
doc_xs_sizes = corpus.get_doc_exts().transform(len)
def get_doc_source_links_sizes(source_level, source_b):
return doc_xs_sizes.loc[source_blocks[source_level].eq(source_b)].sum()
@cache
def get_x_source_links_sizes(target_level):
return target_blocks.groupby(target_level)[target_level].agg(
lambda x: doc_xs_sizes.loc[x.index.intersection(doc_xs_sizes.index)].sum()
)
def get_doc_source_doc_sizes(source_level, source_b, target_level):
return source_blocks[source_level].eq(source_b).sum()
def get_x_source_doc_sizes(source_level, source_b, target_level):
return target_blocks.groupby(target_level).size()
# TODO param to choose between link and doc
get_doc_source_sizes, get_x_source_sizes = (
get_doc_source_links_sizes,
get_x_source_links_sizes,
)
# apply pfunc between local values and total (top of hierarchy) values
def apply_pfunc(vals):
if pfunc is not None:
if source_btype == "doc":
((b_top, g_top),) = values[(max(source_levels),)].groupby(level=0)
vals_all = g_top[(b_top, target_level)]
sizes = get_doc_source_sizes(max(source_levels), b_top)
return pfunc(vals, vals_all / sizes)
elif target_btype == "doc":
(val_all,) = values[(source_level, source_hb[-1], max(target_levels))]
(size,) = get_x_source_sizes(max(target_levels))
return pfunc(vals, val_all / size)
else:
return vals
get_target_hbindex = cache(partial(sorted_hierarchical_block_index, target_blocks))
fix_nans = make_fix_nans(getattr(pfunc, "__name__", None))
st_val = [] # st_val[source_map_index][target_map_index]: values
total_blocks = sum(source_blocks.groupby(level).ngroups for level in source_levels)
prog = tqdm(desc=f"Linking {source_btype} to {target_btype}", total=total_blocks)
for source_level in list(reversed(source_levels)):
source_hbindex = sorted_hierarchical_block_index(
source_blocks, source_levels, source_level
)
for source_hb in source_hbindex:
st_val.append([])
for target_level in reversed(target_levels):
target_hbindex = get_target_hbindex(tuple(target_levels), target_level)
target_bindex = [thb[-1] for thb in target_hbindex]
vals = values.loc[(source_level, source_hb[-1], target_level)]
# fraction of domain
sizes = (
get_doc_source_sizes(source_level, source_hb[-1])
if source_btype == "doc"
else get_x_source_sizes(target_level)
)
vals = vals / sizes
# pfunc with baseline
vals = apply_pfunc(vals)
# store non normalized data (after fixing possible? nans)
st_val[-1].extend(vals.map(fix_nans).loc[target_bindex].to_list())
prog.update()
prog.close()
level_bounds = (
target_blocks[reversed(target_levels)]
.agg(lambda s: len(s.unique()))
.cumsum()
.to_list()
)
target_datasource = target_fig.select_one("{}_map_data".format(target_btype))
source_datasource = source_fig.select_one("{}_map_data".format(source_btype))
link_cb = CustomJS(
args=dict(
selected=source_datasource.selected,
target_datasource=target_datasource,
st_value=st_val,
level_bounds=level_bounds,
norm=norm,
pfunc_kind=getattr(pfunc, "__name__", None),
selection_mode=selection_mode,
),
code=(link_maps_js()),
)
source_datasource.selected.js_on_change("change:indices", link_cb)
selection_mode.js_on_change("value", link_cb)
def link_maps_js():
return (
make_fix_nans_js()
+ make_normalization_factor_js()
+ """
const num_sources = selected.indices.length
const new_target_data = { ...target_datasource.data }
const combine = function(sm) {
if (sm == "single") { return x => x[0] }
if (sm == "multi AND") { return x => Math.min(...x) }
if (sm == "multi OR") { return x => Math.max(...x) }
}(selection_mode.value)
const repr = function(x) {
if ((0 < x) && (x < 1)) { return '(' + (1/x).toPrecision(2) + ')' }
else { return x.toPrecision(2)}
}
const normalization_factor = make_normalization_factor(norm)
const fix_nans = make_fix_nans(pfunc_kind)
if (num_sources == 0) { // simple volue map
new_target_data.z = [...new_target_data.o_z]
new_target_data.value = [...new_target_data.o_value]
new_target_data.text = new_target_data.o_value.map(x => x.toPrecision(2))
}
else if (num_sources > 0) { // single or multiple source relative map
const value = [...st_value[selected.indices[0]]]
if (num_sources > 1) {
for (let i = 0; i < value.length; i++) {
const values_i = [value[i]]
for (const index of selected.indices.slice(1)) {
values_i.push(st_value[index][i])
}
value[i] = combine(values_i)
}
}
let z
if (pfunc_kind == "p_rel") {
z = value.map(Math.log)
} else {
z = [...value]
}
let prev_bound = 0
for (const bound of level_bounds) {
const norm_factor = normalization_factor(z.slice(prev_bound, bound))
z.splice(
prev_bound,
bound - prev_bound,
...z.slice(prev_bound, bound).map(x => x / norm_factor).map(fix_nans),
)
prev_bound = bound
}
new_target_data.value = value
new_target_data.z = z
new_target_data.text = value.map(repr)
}
target_datasource.data = new_target_data
"""
)
def make_fix_nans(kind):
"""
from Python we're fixing for 'value'
that means BEFORE log and normalization
"""
if kind is None:
return lambda x: x
elif kind in ("p_rel", "p_diff"):
nan_value = int(kind == "p_rel")
return lambda x: nan_value if np.isnan(x) else x
else:
raise ValueError
def make_fix_nans_js():
"""
from Javascript we're fixing for 'z' (color)
that means AFTER log and normalization
"""
return """
function make_fix_nans(kind) {
if (kind == "p_rel" || kind == "p_diff") return x => Number.isNaN(x) ? 0 : x
else return x => x
}
"""
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/blocks/link_maps.py
| 0.665302 | 0.40987 |
link_maps.py
|
pypi
|
from itertools import islice
import re
import graph_tool.all as gt
import numpy as np
from ..naming import naming
from .annotations import (
get_xblock_yblocks_elements,
get_subxblocks_yblocks_elements,
load_annotations,
)
from ..misc import get_hash
def dblock_terext_graph(
corpus,
xbtype,
doc_level,
ter_level,
ext_level,
*,
xb_selection=None,
edges="specific",
):
def first_element(yb_info_elements):
return yb_info_elements[0][0] if yb_info_elements else ""
xlevel = {"doc": doc_level, "ter": ter_level, "ext": ext_level}[xbtype]
if edges == "specific":
annotation_function = get_xblock_yblocks_elements
elif edges == "common":
if xlevel == 1:
raise ValueError(
f"`edges=common` requires xbtype (here `{xbtype}`) level > 1 (here `{xlevel}`)."
)
annotation_function = get_subxblocks_yblocks_elements
else:
raise ValueError("`edges` must be one of ['specific', 'common']")
annotations_ylevel = []
xb_selection = set(xb_selection)
if xbtype == "doc":
if ter_level is not None:
ter_annotations = load_annotations(
corpus, annotation_function, "doc", "ter", ter_level
)
annotations_ylevel.append((ter_annotations, ter_level))
if ext_level is not None:
ext_annotations = load_annotations(
corpus, annotation_function, "doc", "ext", ext_level
)
annotations_ylevel.append((ext_annotations, ext_level))
else:
doc_annotations = load_annotations(
corpus, annotation_function, xbtype, "doc", doc_level
)
annotations_ylevel.append((doc_annotations, doc_level))
g = gt.Graph()
g.ep["specificity"] = g.new_edge_property("float")
g.ep["label"] = g.new_edge_property("string")
vp_label = g.vp["label"] = g.add_edge_list(
(
(
corpus.lblock_to_label[xlevel, xb],
corpus.lblock_to_label[ylevel, yb],
yb_info["ent"],
first_element(yb_info["elements"]),
)
for annotations, ylevel in annotations_ylevel
for xb in annotations[xlevel]
for yb, yb_info in islice(
annotations[xlevel][xb]["blocks"].items(), 10
) # only 1st 10
if not xb_selection or xb in xb_selection
),
hashed=True,
eprops=(g.ep["specificity"], g.ep["label"]),
)
vp_labels = {*vp_label}
for xb in xb_selection:
xb_label = corpus.lblock_to_label[xlevel, xb]
if xb_label not in vp_labels:
vp_label[g.add_vertex()] = xb_label
vp_type = g.vp["type"] = g.new_vertex_property("string")
for v in g.vertices():
vp_type[v] = re.search(r"[DTE]", vp_label[v])[0]
return g
def network_map(
corpus,
xbtype,
doc_level,
ter_level=1,
ext_level=None,
*,
xb_selection=None,
split_on=None,
split_list=None,
edges="specific",
):
"""
(corpus)
(xbtype)
(doc_level) display document nodes from this level
(ter_level) display term nodes from this level
(ext_level) display chained nodes from this level
(xb_selection) list of int
Restrict the network to a list of domains from `doc_level`
Show a halo on domain nodes for the difference in relative volume
between those in and out of a set of values such as years:
(split_on) series of values based on which to split
(split_list) values from split series to consider positive
(edges): measure determining edges ("specific" or "common")
"""
xblocks, xblocks_levels = corpus.get_blocks_levels(xbtype)
xlevel = {"doc": doc_level, "ter": ter_level, "ext": ext_level}[xbtype]
arg_xb_selection = xb_selection
if xb_selection is None:
xb_selection = xblocks[xlevel]
xb_selection = np.unique(xb_selection)
g = dblock_terext_graph(
corpus,
xbtype,
doc_level,
ter_level,
ext_level,
xb_selection=xb_selection,
edges=edges,
)
vp_label = g.vp["label"]
vp_block = g.new_vertex_property("int")
for v in g.vertices():
vp_block[v] = corpus.label_to_hblock[vp_label[v]][-1]
vp_type = g.vp["type"]
g.vp["shape"] = g.new_vertex_property(
"string",
[
"square" if t == "D" else "circle" if t == "T" else "hexagon"
for t in vp_type
],
)
g.vp["weight"] = g.new_vertex_property(
"float",
(
3 if vp_type[v] == "D" else 0 if vp_type[v] == "T" else 1
for v in g.vertices()
),
)
g.ep["weight"] = gt.prop_to_size(g.ep["specificity"], mi=1)
# Some data about the corpus to set color and size
size = (
corpus.dblocks[doc_level]
.value_counts()
.loc[xb_selection if xbtype == "doc" else slice(None)]
.map(area2radius)
.pipe(lambda x: x.div(x.max()))
)
if split_on is not None:
period_color_scalar = get_split_fraction_diff_rel(
corpus, xlevel, xb_selection, split_on, split_list
)
vp_size = g.vp["size"] = g.new_vertex_property("float")
vp_color = g.vp["color"] = g.new_vertex_property("vector<float>")
vp_fill_color = g.vp["fill_color"] = g.new_vertex_property("vector<float>")
for v in g.vertices():
vp_size[v] = 100 if vp_type[v] == "D" else 80
vp_fill_color[v] = [
1,
0,
0,
0.85 * size[vp_block[v]] if vp_type[v] == "D" else 0,
]
if vp_type[v] == "D" and split_on is not None:
color = period_color_scalar[vp_block[v]]
vp_color[v] = [0, color > 0, color < 0, np.abs(color)]
elif vp_type[v] == "E":
vp_color[v] = [0, 0, 0, 1]
else:
vp_color[v] = [0, 0, 0, 0]
g.vp["position"] = gt.sfdp_layout(
g, eweight=gt.prop_to_size(g.ep["specificity"], mi=0.01, power=1)
)
fname_params = [
("xbtype", xbtype),
("edges", edges),
("doc_level", doc_level),
("ter_level", ter_level),
("ext_level", ext_level),
(
"xblocks",
None
if arg_xb_selection is None
else get_hash(tuple(xb_selection.tolist())),
),
("split_on", getattr(split_on, "name", None)),
("split_list", None if split_list is None else get_hash(tuple(split_list))),
]
if sample_hash := corpus.get_sample_hash(doc=True, ter=ter_level, ext=ext_level):
fname_params = [("sample", sample_hash), *fname_params]
target_dir = corpus.blocks_adir if ext_level is None else corpus.chained_adir
fpath_graphml = target_dir / naming.gen("network_map", fname_params, "graphml")
fpath_pdf = target_dir / naming.gen("network_map", fname_params, "pdf")
fpath_svg = target_dir / naming.gen("network_map", fname_params, "svg")
g.save(str(fpath_graphml))
draw(g, str(fpath_pdf))
draw(g, str(fpath_svg))
if False: # Disable improved layout for now
gv = improve_layout(g)
gv.save(
str(
target_dir / naming.gen("network_map_improved", fname_params, "graphml")
)
)
draw(
gv,
str(target_dir / naming.gen("network_map_improved", fname_params, "pdf")),
)
return {"graphml": fpath_graphml, "pdf": fpath_pdf, "svg": fpath_svg}
def get_split_fraction_diff_rel(
corpus, dlevel, dblocks_selection, split_on, split_list
):
"""
Show a halo on domain nodes for the difference in relative volume
between those in and out of a set of values such as years:
(split_on) series of values based on which to split
(split_list) values from split series to consider positive
Example:
get_split_fraction_diff_rel(
...,
split_on=corpus.data[corpus.col_time],
split_list=corpus.data[corpus.col_time].pipe(lambda x: x[x.ge(2002)])
)
"""
index = split_on.index.intersection(
corpus.dblocks.index[corpus.dblocks[dlevel].isin(set(dblocks_selection))]
)
split_on = split_on[index]
split_on.name = "split_on"
dblocks = corpus.dblocks.loc[index, dlevel]
dblocks.name = "dblocks"
grouped_fractions = (
dblocks.groupby([split_on, dblocks])
.count()
.div(split_on.value_counts(), level="split_on")
)
split_fractions = grouped_fractions.groupby(
[grouped_fractions.index.isin(set(split_list), level="split_on"), "dblocks"]
).mean()
diff_fractions = split_fractions[True].sub(split_fractions[False], fill_value=0)
diff_fractions_norm = diff_fractions / diff_fractions.abs().max()
return diff_fractions_norm
def area2radius(area):
"""Domain area, currently squares"""
return np.sqrt(area) # np.sqrt(area / np.pi)
def improve_layout(g):
vp_pos = g.vp["position"]
vp_type = g.vp["type"]
vp_pin = g.new_vertex_property("bool", (vp_type[v] == "D" for v in g.vertices()))
center_x = (lambda x: np.mean([x.max(), x.min()]))(vp_pos.get_2d_array([0])[0])
center_y = (lambda x: np.mean([x.max(), x.min()]))(vp_pos.get_2d_array([1])[0])
for v in g.vertices(): # Put origin at center
vp_pos[v] = [vp_pos[v][0] - center_x, vp_pos[v][1] - center_y]
for v in g.vertices(): # Send degree-1 topics outwards
if vp_type[v] == "T" and v.in_degree() < 2:
vn = next(v.in_neighbours())
if np.linalg.norm(vp_pos[v].a) < np.linalg.norm(vp_pos[vn].a):
vp_pos[v] = vp_pos[vn].a - (vp_pos[v].a - vp_pos[vn].a)
max_norm = max(np.linalg.norm(vp_pos[v].a) for v in g.vertices())
for v in g.vertices(): # Push domains to an outer rim
if vp_type[v] == "D":
norm_ratio = np.linalg.norm(vp_pos[v].a) / max_norm
old_posv = vp_pos[v].a.copy()
vp_pos[v] = vp_pos[v].a * (1 / norm_ratio) * 2
for w in v.out_neighbours():
if w.in_degree() < 2:
# print(vp_pos[v].a - old_posv)
vp_pos[w] = vp_pos[w].a + (vp_pos[v].a - old_posv)
vp_pos[w] = 1.5 * vp_pos[w].a
# Redo sfdp with frozen domains
vp_pos = g.vp["position"] = gt.sfdp_layout(g, pos=vp_pos, pin=vp_pin)
return gt.GraphView(g, vfilt=lambda v: vp_type[v] != "T" or v.in_degree() > 1)
def draw(g, output_file=None):
gt.graph_draw(
g,
pos=g.vp["position"],
vprops=dict(
size=g.vp["size"],
shape=g.vp["shape"],
color=g.vp["color"],
pen_width=g.vp["weight"],
fill_color=g.vp["fill_color"],
text=g.vp["label"],
text_position=-2,
text_color="black",
font_size=15,
),
eprops=dict(
text=g.ep["label"],
font_size=12,
pen_width=g.ep["weight"],
),
output_size=(3 * 1920, 3 * 1080),
adjust_aspect=False,
bg_color="white",
output=output_file,
)
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/blocks/network_map.py
| 0.551574 | 0.269512 |
network_map.py
|
pypi
|
from pathlib import Path
import re
from bokeh.resources import INLINE as bokeh_INLINE
import colorcet
import lxml.html as html
import lxml.html.builder as E
TABLES_CSS = Path(__file__).parent / "tables.css"
def html_output(body_elements, outfile):
outfile = Path(outfile)
outfile.write_bytes(html.tostring(html_build(body_elements)))
# html_build(report).getroottree().write(outfile, method='html')
return outfile
def html_build(body_elements):
head = E.HEAD(
E.META(charset="utf-8"),
E.BASE(target="_blank"),
E.TITLE("Sashimi block report"),
E.STYLE(TABLES_CSS.read_text()),
)
body = E.BODY(*body_elements)
if body.xpath("//div[@data-root-id]"):
head.extend(html.fragments_fromstring(bokeh_INLINE.render()))
return E.HTML(head, body)
def html_domain_documents_table(data, labels, code_terms_map=None):
format_marks = make_format_marks(code_terms_map)
tr = E.TR()
if "venues" in data:
tr.append(E.TH("Venue"))
tr.append(E.TH("Title"))
tr.append(E.TH("Abstract"))
if "code_terms" in data:
tr.append(E.TH("Code terms"))
table = E.TABLE(
E.THEAD(tr),
tbody := E.TBODY(),
)
for idx in data["examples"].index:
tr = E.TR()
if "venues" in data:
tr.append(E.TD(data["venues"].loc[idx]))
tr.append(
E.TD(html.fragment_fromstring(data["titles"].loc[idx], create_parent="p"))
)
tr.append(
E.TD(
*[
y
for x in data["abstracts"].loc[idx]
for y in (*format_marks(x), E.HR)
][:-1],
CLASS="abstracts",
)
)
if "code_terms" in data:
tr.append(
E.TD(format_code_terms(data["code_terms"].loc[idx]), CLASS="code_terms")
)
tbody.append(tr)
return E.DIV(table)
def html_xblock_yblocks_table(data, xbtype, labels):
yb_key = "common" if "common" in data else "specific"
table = E.TABLE(
E.THEAD(
E.TR(
E.TH(
data["id"],
E.BR,
E.BR,
E.SMALL(f"({pow(2, data[yb_key]['ent'][1]):.2})", CLASS="tstats"),
)
)
)
)
tbody = E.TBODY()
table.append(tbody)
tbody.append(
E.TR(
E.TD(
E.DIV(
*format_xblock_yblocks_elements(data[yb_key], labels),
CLASS="single_yblocks",
)
)
)
)
if "plot" in data:
tbody.append(
E.TR(
E.TD(
E.DIV(
*map(html.fragment_fromstring, data["plot"]),
CLASS="single_yblocks",
)
)
)
)
return E.DIV(table)
def html_multi_xblocks_yblocks_table(data, xbtype, labels, plots=False):
levels = sorted(set(x[0] for x in data))
deep = levels != [1]
ln_label = f"L{levels[-1]}{xbtype[0].upper()}" if len(levels) == 1 else "ID"
l1_label = f"L1{xbtype[0].upper()}"
columns = (
["Plot", ln_label, "Common", "Plot", l1_label, "Specific", "Elements"]
if deep and plots
else [ln_label, "Common", l1_label, "Specific", "Elements"]
if deep and not plots
else ["Plot", l1_label, "Specific", "Elements"]
if not deep and plots
else [l1_label, "Specific", "Elements"]
)
table = E.TABLE(E.THEAD(E.TR(*(E.TH(str(key)) for key in columns))))
tbody = E.TBODY()
table.append(tbody)
for _, d_data in data.items():
drow = E.TR()
if deep:
if plots:
drow.append(E.TD(*map(html.fragment_fromstring, d_data["plot"])))
drow.extend(
(
E.TD(
d_data["id"],
E.BR,
E.BR,
E.SMALL(
f"({pow(2, d_data['common']['ent'][1]):.2})", CLASS="tstats"
),
rowspan=str(len(d_data[l1_label])),
CLASS="label",
),
E.TD(
*format_xblock_yblocks_elements(d_data["common"], labels),
CLASS="yblocks",
rowspan=str(len(d_data[l1_label])),
),
)
)
for _, sd_data in d_data[l1_label].items():
tbody.append(drow)
if plots:
drow.append(E.TD(*map(html.fragment_fromstring, sd_data["plot"])))
drow.extend(
(
E.TD(
sd_data["id"],
E.BR,
E.BR,
E.SMALL(
f"({pow(2, sd_data['specific']['ent'][1]):.2})",
CLASS="tstats",
),
CLASS="label",
),
E.TD(
*format_xblock_yblocks_elements(sd_data["specific"], labels),
CLASS="yblocks",
),
E.TD(format_elements(sd_data["elements"]), CLASS="elements"),
)
)
drow = E.TR()
return E.DIV(table)
def format_xblock_yblocks_elements(xblock_yblocks_elements, labels):
formatted = []
ylevel = xblock_yblocks_elements["levels"][1]
for yb, yb_info in xblock_yblocks_elements["blocks"].items():
formatted.extend(
(
labels(ylevel, yb),
E.SMALL(
f' ({yb_info["ent"] / xblock_yblocks_elements["ent"][1]:.0%})',
CLASS="tstats",
),
": ",
)
)
for el, el_ent in yb_info["elements"]:
formatted.extend(
(
f"{el}",
E.SMALL(f' ({el_ent / yb_info["ent_el"]:.0%})', CLASS="tstats"),
)
)
formatted.append(", ")
formatted.pop()
formatted.append(E.BR())
formatted.append(E.BR())
if formatted:
formatted.pop()
return formatted
def format_elements(elements):
return E.UL(*(html.fragment_fromstring(x, create_parent="li") for x in elements))
def format_code_terms(code_terms):
return E.UL(
*(
html.fragment_fromstring(
f'<span style="font-size:0;opacity:0">code:</span>{x}',
create_parent="li",
)
for x in code_terms
)
)
def make_format_marks(code_terms_map):
if code_terms_map is None:
return lambda x: x
def split_rx(rex):
return re.compile(r"(?i)\b(" + rex + r")\b")
code_terms_rx = {k: split_rx(v) for k, v in code_terms_map.items()}
code_terms_color = {
k: colorcet.glasbey_dark[i] for i, k in enumerate(code_terms_map)
}
all_code_terms_rx = split_rx(r"|".join(code_terms_map.values()))
def get_color(text):
for term, term_rx in code_terms_rx.items():
if term_rx.match(text):
return code_terms_color[term]
raise ValueError("Text did not match a term.")
def format_marks(text):
parts = all_code_terms_rx.split(text)
return [
E.SPAN(x, STYLE=f"color: white; background-color: {get_color(x)}")
if i % 2
else x
for i, x in enumerate(parts)
]
return format_marks
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/blocks/tables/tables_html.py
| 0.418697 | 0.235812 |
tables_html.py
|
pypi
|
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from lxml import etree
import pkg_resources
class WosLam:
"""
Access WebOfScience Links Article Match API.
Appropriate WOS credentials must be provided during instantiation.
See the API documentation for details on fields and queries:
<http://ipscience-help.thomsonreuters.com/LAMRService/WebServiceOperationsGroup/requestAPIWoS/descriptionXMLrequest.html>
"""
def __init__(self, user, password):
self.apiurl = "https://ws.isiknowledge.com/cps/xrpc"
self.whatfields = (
"timesCited",
"ut",
"doi",
"pmid",
"sourceURL",
"citingArticlesURL",
"relatedRecordsURL",
)
self.lookupfields = {"doi", "ut", "pmid", "isbn"}
tpl_path = pkg_resources.resource_filename("sashimi", "wos_api_request.tpl")
with open(tpl_path) as tplfile:
self.reqtpl = tplfile.read().format(user=user, password=password)
def get(self, fields, queries):
"""
Gets information from WOS as dictionaries.
fields - list of strings
queries - list of dictionaries of type 'id'->'value'
Returns:
iterator over dictionaries containing the results for each query
"""
for i in range(0, len(queries), 50):
part = queries[i : i + 50]
for r in self.get_50(fields, part):
yield r
print("Got {} of {}".format(i + 50, len(queries)))
def get_50(self, fields, queries):
if len(queries) > 50:
raise Exception("Queries are limited to 50 elements")
reply = self.get_raw(fields, queries)
root = etree.fromstring(reply)
xns = {"x": "http://www.isinet.com/xrpc42"}
if root.xpath("//x:error", namespaces=xns):
raise Exception("Request returned errors, likely bad credentials")
qxpath = '//x:map[@name="cite_{}"]'
fxpath = '*/*[@name="{}"]/text()'
for count, q in enumerate(queries):
elm = root.xpath(qxpath.format(count), namespaces=xns)[0]
r = dict()
for name in fields:
try:
r[name] = elm.xpath(fxpath.format(name))[0]
except IndexError as e:
print("Not found {} for {}".format(fields, q))
r[name] = None
yield r
def get_raw(self, fields, queries):
what = self.get_what(fields)
lookup = self.get_lookup(queries)
request = self.get_request(what, lookup)
with urlopen(request) as f:
return f.read()
def get_request(self, what, lookup):
return Request(
self.apiurl, self.reqtpl.format(what=what, lookup=lookup).encode()
)
def get_what(self, fields):
what = []
for f in fields:
if f in self.whatfields:
what.append("<val>{}</val>".format(f))
else:
raise Exception("Invalid field: {}".format(f))
return "\n".join(what)
def get_lookup(self, queries):
lookup = []
for count, q in enumerate(queries):
if q and self.lookupfields.issuperset(q):
lookup.append('<map name="cite_{}">'.format(count))
for k in q:
lookup.append('<val name="{}">{}</val>'.format(k, q[k]))
lookup.append("</map>")
else:
raise Exception("Invalid field in: {}".format(list(q.keys())))
return "\n".join(lookup)
# Convenience functions
def convert(self, c_from, c_to, items):
for r in self.get([c_to], [{c_from: v} for v in items]):
yield r[c_to]
def pmid_to_timescited(self, pmids):
return self.convert("pmid", "timesCited", pmids)
def doi_to_timescited(self, dois):
return self.convert("doi", "timesCited", dois)
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/util/wos_lam/__init__.py
| 0.52902 | 0.220783 |
__init__.py
|
pypi
|
import graph_tool.all as gt
from itertools import count
import numpy as np
from tqdm import tqdm
from ..naming import naming
from .util import contiguous_map_nested_blockstate
def create_docter_graph(corpus, by=None):
"""
Creates one or more document term graphs, names it, stores it and
sets as loaded.
Parameters
----------
by: name of a column from `corpus.data` or an equivalent `pandas:Series`
If provided, group documents by this column and create multiple graphs
Returns
-------
names: a `list` containing the names of the graphs created
"""
name_args = [("kind", "docter"), ("column", corpus.column)]
if sample := corpus.get_sample_hash(doc=True, ter=True):
name_args.append(("sample", sample))
if by is not None:
name_by = (
by
if isinstance(by, str)
else by.name
if hasattr(by, "name")
else by.__name__
)
if not name_by:
raise ValueError
def get_graph_name(name_args, group_name):
name_args += [] if by is None else [("by", (name_by, group_name))]
return naming.gen("graph", name_args, corpus.ext_g)
graph_names = []
for group_name, group in (
[("all", corpus.data)] if by is None else corpus.data.groupby(by)
):
print(f"Generating doc ter graph for {group_name}")
graph_name = get_graph_name(name_args, group_name)
fpath = corpus.data_dir / graph_name / graph_name
load = False
try:
fpath.parent.mkdir(exist_ok=False)
except FileExistsError:
print("Found existing graph. Will skip creation, test loading.")
load = True
if load:
g = gt.load_graph(str(fpath))
else:
doc_tokens = corpus.data[corpus.column].loc[
corpus.samplify(group.index, corpus.data[corpus.column])
]
try:
g = gen_docter_graph(doc_tokens, corpus.get_document_ids())
g.save(str(fpath))
except Exception:
fpath.parent.rmdir() # dir is ours and we failed, so remove it
raise
graph_names.append(graph_name)
if len(graph_names) == 1:
corpus.set_graph(graph_names[0])
print("Graph set to: {}".format(graph_names[0]))
return graph_names
def gen_docter_graph(doc_tokens, doc_ids):
"""
Builds a bipartite undirected graph of documents connecting to the terms
they contain.
Parameters
----------
sample: anything consumable by `samplify`
use the full vocabulary but only a sample of the documents
"""
vocab = {w for d in doc_tokens for s in d for w in s}
print("Vocab size: {}".format(len(vocab)))
vocabindex = dict((w, n) for n, w in enumerate(vocab))
g = gt.Graph(directed=False)
g.vp["type"] = g.new_vertex_property("int") # type = 0: document, 1: term
g.vp["name"] = g.new_vertex_property("string")
g.add_vertex(len(vocab) + len(doc_tokens))
for w, vi in vocabindex.items():
g.vp["type"][vi] = 1
g.vp["name"][vi] = w
def gen_docter_edges():
document_ids = doc_ids
for vi, di in enumerate(
tqdm(doc_tokens.index, desc="Processing docs"), len(vocab)
):
g.vp["type"][vi] = 0
g.vp["name"][vi] = document_ids[di]
for s in doc_tokens[di]:
for w in s:
if w in vocab:
yield (vi, vocabindex[w])
g.add_edge_list(gen_docter_edges())
return g
def calc_nested_blockstate(corpus, name_args=[], state_args={}):
"""
Calculate and save a nested blockstate for the graph, using
`graph_tool.inference.minimize.minimize_nested_blockmodel_dl()`.
Parameters
----------
g: a `graph_tool.Graph` instance
If not provided, use the current graph.
name_args: `list` of 2-tuples
Extra arguments to add to the blockstate filename.
state_args: `dict`
Passed downstream. For key "ec", value is passed as `g.ep[value]`.
"""
state_args = state_args.copy()
if sample := corpus.get_sample_hash(doc=True, ter=True):
name_args += [("sample", sample)]
for irun in count():
fname = naming.gen(
"blockstate",
name_args + [("step", "mnbdl")] + [("run", irun)],
corpus.ext_nbs,
)
try:
(corpus.graph_dir / fname).mkdir(exist_ok=False)
print(f"Reserving name: {fname}")
break
except OSError:
pass
g = corpus.graph
store_blockstate_args = {"fpath": corpus.graph_dir / fname / fname}
if "ec" in state_args:
state_args["base_type"] = gt.LayeredBlockState
store_blockstate_args["ec"] = state_args["ec"]
state_args["ec"] = g.ep[store_blockstate_args["ec"]]
if "layers" in state_args:
store_blockstate_args["layers"] = state_args["layers"]
if "type" in g.vp:
assert "pclabel" not in state_args
store_blockstate_args["pclabel"] = "type"
state_args["pclabel"] = g.vp[store_blockstate_args["pclabel"]]
print('Vertex property "type" found, using it as pclabel')
print("Starting minimization...")
state = gt.minimize_nested_blockmodel_dl(g, state_args=state_args)
state = contiguous_map_nested_blockstate(state)
corpus.store_blockstate(state=state, **store_blockstate_args)
print("Saved state: {}".format(fname))
corpus.loaded["blockstate"] = str(fname)
return fname
def refine_state(state, strategy="sweep"):
print("Refining state...")
if isinstance(strategy, str):
strategy = [strategy]
for method in strategy:
if method == "sweep":
for i in range(1000):
state.multiflip_mcmc_sweep(beta=np.inf, niter=10)
if method == "anneal":
gt.mcmc_anneal(
state,
beta_range=(1, 10),
niter=1000,
mcmc_equilibrate_args=dict(force_niter=10),
)
|
/sashimi_domains-0.9.3-py3-none-any.whl/sashimi/graph_models/domain_topic_model.py
| 0.713332 | 0.223949 |
domain_topic_model.py
|
pypi
|
u"""
Created at 2022.05.31
This script contains all the command line parameters
"""
import gzip
import os
import sys
from multiprocessing import cpu_count
from typing import Optional, Dict, Set, Tuple
import click
import matplotlib as mpl
from click_option_group import optgroup
from loguru import logger
from sashimi.base.GenomicLoci import GenomicLoci
from sashimi.conf.config import CLUSTERING_METHOD, COLORS, COLORMAP, DISTANCE_METRIC, IMAGE_TYPE
from sashimi.file.ATAC import ATAC
from sashimi.plot import Plot
__version__ = "0.1.5"
__author__ = "ygidtu & Ran Zhou"
__email__ = "[email protected]"
def decode_region(region: str):
regions = region.split(":")
if len(regions) < 3:
strand = "+"
else:
strand = regions[-1]
sites = [int(x) for x in regions[1].split("-")]
return GenomicLoci(regions[0], sites[0], sites[1], strand)
class FileList(object):
def __init__(self,
path: str,
category: str,
color: str = "black",
label: Optional[str] = None,
group: Optional[str] = None,
exon_focus: Optional[str] = None,
library: str = "fru",
trans: Optional[str] = None,
depth: int = 30000):
self.path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(self.path):
raise FileNotFoundError(f"{self.path} not found.")
self.label = label if label else os.path.basename(path)
self.group = group
self.color = color
self.category = category
self.exon_focus = exon_focus
self.library = library
self.trans = trans
self.depth = depth
@property
def name(self) -> str:
return os.path.basename(self.path)
def __str__(self):
return f"path: {self.path} \nlabel: {self.label} \ngroup: {self.group} \n" \
f"color: {self.color} \ncategory: {self.category} \nlibrary: {self.library}"
def load_barcodes(barcode: str) -> Tuple[Dict[str, Dict[str, Set[str]]], Dict[str, str]]:
u"""
as name says
:param barcode: the path to barcode file
"""
r = gzip.open(barcode, "rt") if barcode.endswith(".gz") else open(barcode, "r")
res = {}
colors = {}
for line in r:
line = line.strip().split()
if len(line) >= 4:
key, bc, group, color = line[:4]
colors[group] = color
elif len(line) == 3:
key, bc, group = line[:3]
elif len(line) == 2:
key, bc = line
group = ""
else:
continue
if key not in res.keys():
res[key] = {}
if group not in res[key]:
res[key][group] = set()
res[key][group].add(bc)
r.close()
return res, colors
def __read_iter__(path):
with open(path) as r:
for idx, line in enumerate(r):
if line.startswith("#"):
continue
line = line.split()
if not line:
continue
yield idx, line
def process_file_list(infile: str, category: str = "density"):
u"""
Process and check the file list format_
:param infile: path to input file list
:param category: the image type of file list used for
"""
try:
if category in ["density"]:
for idx, line in __read_iter__(infile):
path, category = line[0], line[1]
if category not in ["bam", "bigwig", "bw", "depth", "igv", "atac"]:
raise ValueError(f"{category} is not supported in density plot.")
if len(line) < 3:
yield FileList(path=path, category=category, color=COLORS[idx % len(COLORS)])
elif len(line) < 4:
yield FileList(path=path, category=category, color=COLORS[idx % len(COLORS)], label=line[2])
elif len(line) < 5:
yield FileList(path=path, category=category, color=line[3], label=line[2])
elif len(line) < 6:
yield FileList(path=path, category=category, color=line[3], label=line[2], library=line[4])
else:
yield FileList(path=path, category=category, color=line[3], label=line[2], library=line[4],
depth=line[5])
elif category in ["heatmap"]:
groups = {}
for idx, line in __read_iter__(infile):
path, category = line[0], line[1]
if category not in ["bam", "bigwig", "bw", "depth", "atac"]:
raise ValueError(f"{category} is not supported in heatmap plot.")
if len(line) < 3:
yield FileList(path=path, category=category, color=COLORMAP[0])
elif len(line) < 4:
groups[line[2]] = 0
yield FileList(path=path, category=category,
color=COLORMAP[len(groups) % len(COLORMAP)], group=line[2])
else:
groups[line[2]] = 0
yield FileList(path=path, category=category,
color=line[3], group=line[2], library=line[4] if len(line) > 4 else "fru")
elif category in ["line"]:
groups = {}
for idx, line in __read_iter__(infile):
path, category = line[0], line[1]
if category not in ["bam", "bigwig", "bw", "depth"]:
raise ValueError(f"{category} is not supported in density plot.")
if len(line) < 3:
yield FileList(path=path, category=category, color=COLORS[idx % len(COLORS)])
elif len(line) < 4:
if line[2] not in groups:
groups[line[2]] = 0
groups[line[2]] += 1
yield FileList(path=path, category=category,
color=COLORS[groups[line[2]] % len(COLORS)], group=line[2])
elif len(line) < 5:
if line[2] not in groups:
groups[line[2]] = 0
groups[line[2]] += 1
yield FileList(path=path, category=category, label=line[3],
color=COLORS[groups[line[2]] % len(COLORS)], group=line[2])
elif len(line) < 6:
if line[2] not in groups:
groups[line[2]] = 0
groups[line[2]] += 1
yield FileList(path=path, category=category, label=line[3],
color=line[4], group=line[2])
elif category in ["interval"]:
for idx, line in __read_iter__(infile):
if len(line) < 2:
yield FileList(path=line[0], category="interval")
else:
yield FileList(path=line[0], category="interval", label=line[1])
elif category in ["igv"]:
for idx, line in __read_iter__(infile):
path, category = line[0], line[1]
if category not in ["bam", "bigwig", "bw", "depth", "igv"]:
raise ValueError(f"{category} is not supported in density plot.")
if len(line) < 3:
yield FileList(path=path, category=category, color=COLORS[idx % len(COLORS)])
elif len(line) < 4:
yield FileList(path=path, category=category, color=COLORS[idx % len(COLORS)], label=line[2])
elif len(line) < 5:
yield FileList(path=path, category=category, color=line[3], label=line[2])
else:
yield FileList(path=path, category=category, color=line[3], label=line[2], exon_focus=line[4])
elif category in ["hic"]:
for idx, line in __read_iter__(infile):
path, category = line[0], line[1]
if len(line) < 3:
yield FileList(path=path, category=category)
elif len(line) < 4:
yield FileList(path=path, category=category, label=line[2])
elif len(line) < 5:
yield FileList(path=path, category=category, )
else:
yield FileList(path=path, category=category, label=line[2], color=line[3], trans=line[4])
except FileNotFoundError as err:
logger.error(f"{infile} -> {err}")
exit(1)
return None
@click.command(context_settings=dict(help_option_names=['-h', '--help']), )
@click.version_option(__version__, message="Current version %(version)s")
@click.option("--debug", is_flag=True, help="enable debug level log")
@click.option("-e", "--event", type=click.STRING, required=True,
help="Event range eg: chr1:100-200:+")
@optgroup.group("Common input files configuration")
@optgroup.option("--color-factor", default=1, type=click.IntRange(min=1),
help="Index of column with color levels (1-based); "
"NOTE: LUAD|red -> LUAD while be labeled in plots and red while be the fill color",
show_default=True)
@optgroup.option("--barcode", type=click.Path(exists=True), show_default=True,
help="Path to barcode list file, At list two columns were required, "
"- 1st The name of bam file, not the alias of bam; \b"
"- 2nd the barcode; \b"
"- 3rd The group label, optional; \b"
"- 4th The color of each cell type, default using the color of corresponding bam file.\n")
@optgroup.option("--barcode-tag", type=click.STRING, default="CB", show_default=True,
help="The default cell barcode tag label")
@optgroup.option("--umi-tag", type=click.STRING, default="UB", show_default=True,
help="The default UMI barcode tag label")
@optgroup.option("-p", "--process", type=click.IntRange(min=1, max=cpu_count()), default=1,
help="How many cpu to use")
@optgroup.option("--group-by-cell", type=click.BOOL, is_flag=True, help="Group by cell types in density/line plot")
@optgroup.option("--remove-duplicate-umi", type=click.BOOL, is_flag=True, help="Drop duplicated UMIs by barcode")
@optgroup.group("Output settings")
@optgroup.option("-o", "--output", type=click.Path(),
help="Path to output graph file", show_default=True)
@optgroup.option("-d", "--dpi", default=300, type=click.IntRange(min=1, clamp=True),
help="The resolution of output file", show_default=True)
@optgroup.option("--raster", is_flag=True, show_default=True,
help="The would convert heatmap and site plot to raster image "
"(speed up rendering and produce smaller files), only affects pdf, svg and PS")
@optgroup.option("--height", default=1, type=float,
help="The height of output file, default adjust image height by content", show_default=True)
@optgroup.option("--width", default=10, type=click.IntRange(min=0, clamp=True),
help="The width of output file, default adjust image width by content", show_default=True)
@optgroup.option("--backend", type=click.STRING, default="Agg", help="Recommended backend", show_default=True)
@optgroup.group("Reference settings")
@optgroup.option("-r", "--reference", type=click.Path(exists=True),
help="Path to gtf file, both transcript and exon tags are necessary")
@optgroup.option("--interval", type=click.Path(exists=True),
help="Path to list of interval files in bed format, "
"1st column is path to file, 2nd column is the label [optional]")
@optgroup.option("--show-id", is_flag=True, show_default=True, help="Whether show gene id or gene name")
@optgroup.option("--show-exon-id", is_flag=True, show_default=True, help="Whether show gene id or gene name")
@optgroup.option("--no-gene", is_flag=True, type=click.BOOL, show_default=True,
help="Do not show gene id next to transcript id")
@optgroup.option("--domain", default=False, is_flag=True, type=click.BOOL, show_default=True,
help="Add domain information into reference track")
@optgroup.option("--proxy", default=None, type=click.STRING,
help="The http or https proxy for EBI/Uniprot requests,"
"if `--domain` is True, eg: http://127.0.0.1:1080")
@optgroup.option("--timeout", default=10, type=click.IntRange(min=1, clamp=True),
show_default=True,
help="The requests timeout when `--domain` is True.")
@optgroup.option("--local-domain", default="", is_flag=False, type=click.STRING, show_default=True,
help="Load local domain folder and load into reference track, download from "
"https://hgdownload.soe.ucsc.edu/gbdb/hg38/uniprot/")
@optgroup.option("--remove-empty", is_flag=True, type=click.BOOL, show_default=True,
help="Whether to plot empty transcript")
@optgroup.option("--transcripts-to-show", default="", show_default=True,
help="Which transcript to show, transcript name or id in gtf file, eg: transcript1,transcript2")
@optgroup.option("--choose-primary", is_flag=True, type=click.BOOL, show_default=True,
help="Whether choose primary transcript to plot.")
@optgroup.option("--ref-color", default="black", type=click.STRING,
show_default=True, help="The color of exons")
@optgroup.option("--intron-scale", type=click.FLOAT, default=0.5, help="The scale of intron", show_default=True)
@optgroup.option("--exon-scale", type=click.FLOAT, default=1, help="The scale of exon", show_default=True)
@optgroup.group("Density plot settings")
@optgroup.option("--density", type=click.Path(exists=True),
help="""
The path to list of input files, a tab separated text file, \b
- 1st column is path to input file, \b
- 2nd column is the file category, \b
- 3rd column is input file alias (optional), \b
- 4th column is color of input files (optional),
- 5th column is the library of input file (optional, only required by bam file). \n
""")
@optgroup.option("--customized-junction", type=click.STRING, default=None, show_default=True,
help="Path to junction table column name needs to be bam name or bam alias.")
@optgroup.option("--only-customized-junction", is_flag=True, show_default=True, help="Only used customized junctions.")
@optgroup.option("-t", "--threshold", default=0, type=click.IntRange(min=0, clamp=True),
show_default=True, help="Threshold to filter low abundance junctions")
@optgroup.option("--density-by-strand", is_flag=True, type=click.BOOL,
show_default=True, help="Whether to draw density plot by strand")
@optgroup.option("--show-site", is_flag=True, type=click.BOOL,
show_default=True, help="Whether to draw additional site plot")
@optgroup.option("--site-strand", type=click.Choice(["all", "+", "-"]), default="all", show_default=True,
help="Which strand kept for site plot, default use all")
@optgroup.option("--included-junctions", type=click.STRING, default=None,
help="The junction id for including, chr1:1-100", show_default=True)
@optgroup.option("--show-junction-num", type=click.BOOL, is_flag=True, show_default=True,
help="Whether to show the number of junctions")
@optgroup.option("--sc-density-height-ratio", type=float, default=1, show_default=True,
help="The relative height of single cell density plots")
@optgroup.group("Line plot settings")
@optgroup.option("--line", type=click.Path(exists=True),
help="""
The path to list of input files, a tab separated text file, \b
- 1st column is path to input file, \b
- 2nd column is the file category, \b
- 3rd column is input file group (optional), \b
- 4th column is input file alias (optional),
- 5th column is color platte of corresponding group (optional).
""")
@optgroup.option("--hide-legend", default=False, is_flag=True, type=click.BOOL, help="Whether to hide legend")
@optgroup.option("--legend-position", default="upper right", type=click.STRING, help="The legend position")
@optgroup.option("--legend-ncol", default=0, type=click.IntRange(min=0, clamp=True),
help="The number of columns of legend")
@optgroup.group("Heatmap plot settings")
@optgroup.option("--heatmap", type=click.Path(exists=True),
help="""
The path to list of input files, a tab separated text file, \b
- 1st column is path to input file, \b
- 2nd column is the file category, \b
- 3rd column is input file group (optional), \b
- 4th column is color platte of corresponding group.
""")
@optgroup.option("--clustering", is_flag=True, show_default=True, help="Enable clustering of the heatmap")
@optgroup.option("--clustering-method", type=click.Choice(CLUSTERING_METHOD), default="ward",
show_default=True, help="The clustering method for heatmap")
@optgroup.option("--distance-metric", type=click.Choice(DISTANCE_METRIC), default="euclidean",
show_default=True, help="The distance metric for heatmap")
@optgroup.option("--heatmap-scale", is_flag=True, show_default=True, help="Do scale on heatmap matrix.")
@optgroup.option("--heatmap-vmin", type=click.INT, show_default=True,
help="Minimum value to anchor the colormap, otherwise they are inferred from the data.")
@optgroup.option("--heatmap-vmax", type=click.INT, show_default=True,
help="Maximum value to anchor the colormap, otherwise they are inferred from the data.")
@optgroup.option("--show-row-names", is_flag=True, show_default=True, help="Show row names of heatmap")
@optgroup.option("--sc-heatmap-height-ratio", type=float, default=.2, show_default=True,
help="The relative height of single cell heatmap plots")
@optgroup.group("IGV settings")
@optgroup.option("--igv", type=click.Path(exists=True),
help="""
The path to list of input files, a tab separated text file, \b
- 1st column is path to input file, \b
- 2nd column is the file category, \b
- 3rd column is input file alias (optional), \b
- 4th column is color of input files (optional),\b
- 5th column is exon_id for sorting the reads (optional).
""")
@optgroup.option("--m6a", default=None, type=click.STRING,
help="""
Sashimi.py will load location information from the given tags and \b
then highlight the RNA m6a modification cite at individual reads. \b
If there are multiple m6a modification site, please add tag as follow, \b
234423,234450
""")
@optgroup.option("--polya", default=None, type=click.STRING,
help="""
Sashimi.py will load length of poly(A) from the given tags and \b
then visualize the poly(A) part at end of each individual reads.
""")
@optgroup.option("--rs", default=None, type=click.STRING,
help="""
Sashimi.py will load real strand information of each reads from the given tags and \b
the strand information is necessary for visualizing poly(A) part.
""")
@optgroup.option("--del-ratio-ignore", default=1.0,
type=click.FloatRange(min=0.0, max=1.0, clamp=True),
help="""
Ignore the deletion gap in nanopore or pacbio reads. \b
if a deletion region was smaller than (alignment length) * (del_ratio_ignore), \b
then the deletion gap will be filled. \b
currently the del_ratio_ignore was 1.0.
""")
@optgroup.group("HiC settings")
@optgroup.option("--hic", type=click.Path(exists=True),
help="""
The path to list of input files, a tab separated text file, \b
- 1st column is path to input file, \b
- 2nd column is the file category, \b
- 3rd column is input file alias (optional), \b
- 4th column is color of input files (optional),\b
- 5th column is data transform for HiC matrix, eg log1p, log2, log10 (optional).
""")
@optgroup.group("Additional annotation")
@optgroup.option("-f", "--genome", type=click.Path(), default=None,
show_default=True, help="Path to genome fasta")
@optgroup.option("--sites", default=None, type=click.STRING,
help="Where to plot additional indicator lines, comma separated int")
@optgroup.option("--stroke", type=click.STRING, show_default=True,
help="The stroke regions: start1-end1:start2-end2@color-label, "
"draw a stroke line at bottom, default color is red")
@optgroup.option("--link", type=click.STRING, show_default=True,
help="The link: start1-end1:start2-end2@color, "
"draw a link between two site at bottom, default color is blue")
@optgroup.option("--focus", type=click.STRING, show_default=True, help="The highlight regions: 100-200:300-400")
@optgroup.group("Motif settings")
@optgroup.option("--motif", type=click.Path(exists=True),
help="The path to customized bedGraph file, first three columns is chrom, start and end site, "
"the following 4 columns is the weight of ATCG.")
@optgroup.option("--motif-region", type=click.STRING, default="",
help="The region of motif to plot in start-end format", show_default=True)
@optgroup.option("--motif-width", type=click.FLOAT, default=0.8,
help="The width of ATCG characters", show_default=True)
@optgroup.group("Layout settings")
@optgroup.option("--n-y-ticks", default=4, type=click.IntRange(min=0, clamp=True),
help="The number of ticks of y-axis")
@optgroup.option("--distance-ratio", type=click.FLOAT, default=0.1,
help="distance between transcript label and transcript line", show_default=True)
@optgroup.option("--reference-scale", type=click.FLOAT, default=.25,
help="The size of reference plot in final plot", show_default=True)
@optgroup.option("--stroke-scale", type=click.FLOAT, default=.25,
help="The size of stroke plot in final image", show_default=True)
@optgroup.group("Overall settings")
@optgroup.option("--font-size", default=8, type=click.IntRange(min=1, clamp=True),
help="The font size of x, y-axis and so on")
@optgroup.option("--reverse-minus", default=False, is_flag=True, type=click.BOOL,
help="Whether to reverse strand of bam/reference file")
@optgroup.option("--hide-y-label", default=False, is_flag=True, type=click.BOOL,
help="Whether hide y-axis label")
@optgroup.option("--same-y", default=False, is_flag=True, type=click.BOOL,
help="Whether different sashimi/line plots shared same y-axis boundaries")
@optgroup.option('--log', type=click.Choice(["0", "2", "10", "zscore"]), default="0",
help="y axis log transformed, 0 -> not log transform; 2 -> log2; 10 -> log10")
@optgroup.option("--title", type=click.STRING, default=None, help="Title", show_default=True)
@optgroup.option("--font", type=click.STRING, default=None, help="Fonts", show_default=True)
def main(**kwargs):
u"""
Welcome to use sashimi
\f
"""
if not kwargs["debug"]:
logger.remove()
logger.add(sys.stderr, level="INFO")
logger.level("INFO")
# print warning info about backend
if (kwargs["domain"] or kwargs["local_domain"]) and kwargs["backend"].lower() != "cairo":
logger.warning(f"{kwargs['backend']} backend may have problems with small domain, "
f"if there is any please try cairo backend instead.")
if kwargs["raster"] and kwargs["heatmap"] and kwargs["backend"].lower() == "cairo":
logger.warning(f"{kwargs['backend']} backend may have problems with rasterized heatmap, "
f"if there is any, please try another backend instead.")
try:
mpl.use(kwargs["backend"])
except ImportError as err:
if kwargs["backend"].lower() == "cairo":
logger.warning("Cairo backend required cairocffi installed")
logger.warning("Switch back to Agg backend")
else:
logger.warning(f"backend error, switch back to Agg: {err}")
mpl.use("Agg")
mpl.rcParams['pdf.fonttype'] = 42
if kwargs["font"]:
mpl.rcParams['font.family'] = kwargs["font"]
for k, v in kwargs.items():
logger.debug(f"{k} => {v}")
if kwargs["included_junctions"] is not None:
included_junctions = set([sj.strip() for sj in kwargs["included_junctions"].split(',')])
else:
included_junctions = {}
p = Plot()
region = decode_region(kwargs["event"])
p.set_region(region=region)
p.add_customized_junctions(kwargs["customized_junction"])
barcodes, sc_colors = {}, {}
if kwargs.get("barcode") and os.path.exists(kwargs.get("barcode")):
barcodes, sc_colors = load_barcodes(kwargs.get("barcode"))
size_factors = {}
# add reference
for key in kwargs.keys():
# print(key)
if key in IMAGE_TYPE and kwargs[key] and os.path.exists(kwargs[key]):
logger.debug(f"add {key} {kwargs[key]}")
if key == "reference":
p.set_reference(kwargs["reference"],
show_gene=not kwargs["no_gene"],
color=kwargs["ref_color"],
remove_empty_transcripts=kwargs["remove_empty"],
choose_primary=kwargs["choose_primary"],
font_size=kwargs["font_size"],
show_id=kwargs["show_id"],
reverse_minus=kwargs["reverse_minus"],
show_exon_id=kwargs["show_exon_id"],
transcripts=kwargs["transcripts_to_show"],
add_domain=kwargs["domain"],
local_domain=kwargs["local_domain"]
)
elif key == "interval":
for f in process_file_list(kwargs[key], key):
p.add_interval(f.path, f.label)
elif key == "density":
for f in process_file_list(kwargs[key], key):
if barcodes and f.name in barcodes.keys() and f.category in ["bam", "atac"]:
for group in barcodes[f.name].keys():
if kwargs["group_by_cell"] and group:
label = group
elif group:
label = f"{f.label} - {group}"
else:
label = f.label
if f.label not in size_factors.keys() and f.category == "atac":
logger.info(f"Indexing {f.path}")
size_factors[f.label] = ATAC.index(f.path, barcodes[f.name])
p.add_density(f.path,
category=f.category,
label=label,
barcode=group,
barcode_groups=barcodes[f.name],
barcode_tag=kwargs["barcode_tag"],
umi_tag=kwargs["umi_tag"],
library=f.library,
size_factor=size_factors.get(f.label),
color=sc_colors.get(group, f.color),
font_size=kwargs["font_size"],
show_junction_number=kwargs["show_junction_num"],
n_y_ticks=kwargs["n_y_ticks"],
show_y_label=not kwargs["hide_y_label"],
show_site_plot=kwargs["show_site"],
strand_choice=kwargs["site_strand"],
density_by_strand=kwargs["density_by_strand"],
only_customized_junction=kwargs["only_customized_junction"],
log_trans=kwargs["log"])
elif f.category != "atac":
p.add_density(f.path,
category=f.category,
label=f.label,
barcode_tag=kwargs["barcode_tag"],
umi_tag=kwargs["umi_tag"],
library=f.library,
color=f.color,
font_size=kwargs["font_size"],
show_junction_number=kwargs["show_junction_num"],
n_y_ticks=kwargs["n_y_ticks"],
show_y_label=not kwargs["hide_y_label"],
show_site_plot=kwargs["show_site"],
strand_choice=kwargs["site_strand"],
density_by_strand=kwargs["density_by_strand"],
log_trans=kwargs["log"])
elif key == "heatmap":
for f in process_file_list(kwargs[key], key):
if barcodes and f.name in barcodes.keys() and f.category in ["bam", "atac"]:
if f.label not in size_factors.keys() and f.category == "atac":
logger.info(f"Indexing {f.path}")
size_factors[f.label] = ATAC.index(f.path, barcodes[f.name])
for group in barcodes[f.name].keys():
p.add_heatmap(f.path,
category=f.category,
label=f"{f.label} - {group}" if group else f.label,
barcode=group,
barcode_groups=barcodes[f.name],
group=f"{f.group} - {group}" if f.group else f.group,
barcode_tag=kwargs["barcode_tag"],
size_factor=size_factors.get(f.label),
umi_tag=kwargs["umi_tag"],
library=f.library,
color=f.color,
show_y_label=not kwargs["hide_y_label"],
clustering=kwargs["clustering"],
clustering_method=kwargs["clustering_method"],
distance_metric=kwargs["distance_metric"],
font_size=kwargs["font_size"],
do_scale=kwargs["heatmap_scale"],
vmin=kwargs["heatmap_vmin"],
vmax=kwargs["heatmap_vmax"],
log_trans=kwargs["log"])
elif f.category != "atac":
p.add_heatmap(f.path,
category=f.category,
group=f.group,
label=f.label,
barcode_tag=kwargs["barcode_tag"],
umi_tag=kwargs["umi_tag"],
library=f.library,
color=f.color,
show_y_label=not kwargs["hide_y_label"],
clustering=kwargs["clustering"],
clustering_method=kwargs["clustering_method"],
distance_metric=kwargs["distance_metric"],
font_size=kwargs["font_size"],
show_row_names=kwargs["show_row_names"],
do_scale=kwargs["heatmap_scale"],
vmin=kwargs["heatmap_vmin"],
vmax=kwargs["heatmap_vmax"],
log_trans=kwargs["log"])
elif key == "line":
for f in process_file_list(kwargs[key], key):
if barcodes and f.name in barcodes.keys() and f.category == "bam":
for group in barcodes[f.name].keys():
if kwargs["group_by_cell"] and group:
label = group
elif group:
label = f"{f.label} - {group}"
else:
label = f.label
p.add_line(f.path,
category=f.category,
label=label,
barcode=group,
barcode_groups=barcodes,
group=f.group,
barcode_tag=kwargs["barcode_tag"],
umi_tag=kwargs["umi_tag"],
library=f.library,
color=sc_colors.get(group, f.color),
show_y_label=not kwargs["hide_y_label"],
font_size=kwargs["font_size"],
n_y_ticks=kwargs["n_y_ticks"],
show_legend=not kwargs["hide_legend"],
legend_position=kwargs["legend_position"],
legend_ncol=kwargs["legend_ncol"],
log_trans=kwargs["log"])
else:
p.add_line(f.path,
category=f.category,
group=f.group,
label=f.label,
barcode_tag=kwargs["barcode_tag"],
umi_tag=kwargs["umi_tag"],
library=f.library,
color=f.color,
show_y_label=not kwargs["hide_y_label"],
font_size=kwargs["font_size"],
n_y_ticks=kwargs["n_y_ticks"],
show_legend=not kwargs["hide_legend"],
legend_position=kwargs["legend_position"],
legend_ncol=kwargs["legend_ncol"],
log_trans=kwargs["log"])
elif key == "igv":
for f in process_file_list(kwargs[key], "igv"):
igv_features = {}
if kwargs["m6a"]:
igv_features.update({"m6a": kwargs["m6a"]})
if kwargs["polya"] and kwargs["rs"]:
igv_features.update({"real_strand": kwargs["rs"], "polya": kwargs["polya"]})
if len(igv_features) == 0:
igv_features = None
p.add_igv(f.path,
category=f.category,
label=f.label,
exon_color=f.color,
intron_color=f.color,
features=igv_features,
font_size=kwargs["font_size"],
n_y_ticks=kwargs["n_y_ticks"],
show_y_label=not kwargs["hide_y_label"],
deletion_ignore=True if kwargs["del_ratio_ignore"] == 1.0 else False,
del_ratio_ignore=kwargs["del_ratio_ignore"],
exon_focus=f.exon_focus
)
elif key == "hic":
for f in process_file_list(kwargs[key], "hic"):
p.add_hic(
f.path,
category=f.category,
label=f.label,
trans=f.trans,
depth=f.depth,
color=f.color,
show_legend=not kwargs["hide_legend"],
show_y_label=not kwargs["hide_y_label"],
font_size=kwargs["font_size"],
n_y_ticks=kwargs["n_y_ticks"]
)
elif key == "motif":
motif_region = None
if kwargs["motif_region"]:
start, end = [int(x) for x in kwargs["motif_region"].split("-")]
motif_region = GenomicLoci(
region.chromosome,
max(start, region.start),
min(region.end, end),
region.strand)
p.add_motif(kwargs[key], motif_region=motif_region, width=kwargs["motif_width"])
elif key == "focus":
p.add_focus(kwargs[key])
elif key == "stroke":
p.add_stroke(kwargs[key])
elif key == "sites":
p.add_sites(kwargs[key])
elif key == "link":
p.add_links(kwargs[key])
if kwargs["group_by_cell"]:
p.merge_by_cell()
p.plot(
kwargs["output"],
width=kwargs["width"],
height=kwargs["height"],
dpi=kwargs["dpi"],
raster=kwargs["raster"],
intron_scale=kwargs["intron_scale"],
exon_scale=kwargs["exon_scale"],
reference_scale=kwargs["reference_scale"],
strock_scale=kwargs["stroke_scale"],
same_y=kwargs["same_y"],
remove_duplicate_umi=kwargs["remove_duplicate_umi"],
threshold=kwargs["threshold"],
sc_height_ratio={
"heatmap": kwargs["sc_heatmap_height_ratio"],
"density": kwargs["sc_density_height_ratio"]
},
distance_between_label_axis=kwargs["distance_ratio"],
included_junctions=included_junctions,
n_jobs=kwargs.get("process", 1)
)
logger.info("DONE")
if __name__ == '__main__':
main()
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/cli.py
| 0.581897 | 0.238572 |
cli.py
|
pypi
|
u"""
Created by [email protected] at 2019.12.06
"""
class Junction(object):
u"""
Created by ygidtu at 2018.12.19
This is used to collect information of single junction
And provide relative position comparison
"""
__slots__ = ["chromosome", "start", "end", "strand"]
def __init__(self, chromosome, start, end, strand: str = "+"):
u"""
init this class
:param chromosome: the chromosome name of the given junction
:param start: the start site of the given junction
:param end: the end site of the given junction
:param strand: the strand of the given junction.
"""
self.chromosome = chromosome
self.start = int(start)
self.end = int(end)
self.strand = strand
if self.end <= self.start:
raise ValueError(f"End site({start}) should bigger than start site({end})")
@property
def length(self):
u"""
:return: int, the length of this junction
"""
return self.end - self.start
@classmethod
def create_junction(cls, string):
u"""
create Junction from chr1:1-100:+
:param string: str, chr1:1-100:+ format or chr1:1-100 also work
:return:
"""
string = string.split(":")
chromosome = string[0]
start, end = string[1].split("-")
strand = "+"
if len(string) > 2:
strand = string[-1]
return cls(chromosome=chromosome, start=start, end=end, strand=strand)
def __hash__(self):
u"""
generate hash
:return:
"""
return hash((self.chromosome, self.start, self.end, self.strand))
def __str__(self):
u"""
convert junctions to string
:return:
"""
return f"{self.chromosome}:{self.start}-{self.end}:{self.strand}"
def __gt__(self, other):
u"""
greater than
compare two junction by length
:param other:
:return:
"""
return self.length > other.length
def __lt__(self, other):
u"""
less than
compare two junction by length
:param other:a
:return:
"""
return self.length < other.length
def __eq__(self, other):
u"""
same length
:param other:
:return:
"""
return self.length == other.length
def is_overlap(self, other):
u"""
whether any overlap with another Junction or GenomicLoci
:param other:
:return:
"""
if self.chromosome != other.chromosome:
return False
return self.start < other.end and self.end > other.start
def is_upstream(self, other):
u"""
whether this junction is upstream of other
:param other:
:return:
"""
assert isinstance(other, Junction), "Input should be Junction class"
if self.chromosome != other.chromosome:
return self.chromosome < other.chromosome
return self.end < self.start
def is_downstream(self, other):
u"""
whether this junction is downstream of other
:param other:
:return:
"""
assert isinstance(other, Junction), "Input should be Junction class"
if self.chromosome != other.chromosome:
return self.chromosome > other.chromosome
return self.start > other.end
if __name__ == '__main__':
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/base/Junction.py
| 0.823577 | 0.520679 |
Junction.py
|
pypi
|
u"""
Convert aa position into genomic coordinate, Ran zhou.
"""
from itertools import islice
from typing import Optional
import numpy as np
class Coordinate(object):
u"""
A Coordinate object for genomic regions.
"""
def __init__(self, coordinate_list: list, strand: str = "*"):
u"""
Set genomic coordinates
:param coordinate_list: a nested tuple of list
:param strand: a strands of given coordinate object
"""
self.strand = strand
self.se = self.__fmt_exons__(coordinate_list)
assert len(self.location_list) == len(set(self.location_list)), \
f"Overlapped regions were found in {self.se}"
@staticmethod
def __fmt_exons__(coordinate_list: list) -> list:
u"""
Format and sort exon list
:param coordinate_list: a nested tuple of list. like [('5','6'),('1','4')]
:return: a nested tuple of list. like [(1,4), (5,6)]
"""
# sorting coordinate based on first of location.
formatted_coordinate_list = list(map(lambda x: tuple(map(int, x)), coordinate_list))
formatted_coordinate_list.sort(key=lambda x: x[0])
return formatted_coordinate_list
@staticmethod
def __get_s_or_e__(coordinate_list: list, index: int) -> list:
u"""
Get start or end site for each given coordinates
:param coordinate_list: a nested tuple of list, like [(1,3),(5,6)]
:param index: the index of tuple, 0 for the left site and 1 for the right end site.
:return: a list which contained the start or end sites.
"""
return list(map(lambda x: x[index], coordinate_list))
@property
def start(self):
return self.__get_s_or_e__(coordinate_list=self.se, index=0)
@property
def end(self):
return self.__get_s_or_e__(coordinate_list=self.se, index=1)
@property
def introns(self):
u"""
Set intronic regions for each coordinate object
:return: a nested tuple of list which contained intronic coordinates.
"""
if len(self.se) == 1:
return None
else:
introns_list = []
for left_exon, right_exon in self.__slide_window__(self.se, num_of_chunk=2):
introns_list.append(tuple(
[left_exon[1] + 1,
right_exon[0] - 1]
))
return introns_list
@staticmethod
def __slide_window__(nested_list: list, num_of_chunk: int):
u"""
A sliding window to slice the given list
:param nested_list: a nested tuple of list, like [(1,3),(5,6)]
:param num_of_chunk: num of element for each chunk.
:return:
"""
nested_list = iter(nested_list)
chunked_list = list(islice(nested_list, num_of_chunk))
if len(chunked_list) == num_of_chunk:
yield chunked_list
for elem in nested_list:
result = chunked_list[1:] + list((elem,))
yield result
@classmethod
def __flatten__(cls, nested_list: list):
u"""
Flatten the nested list
:param nested_list: a nested tuple of list, like [(1,3),(5,6)]
:return:
"""
for sub_list in nested_list:
if hasattr(sub_list, "__iter__") and not isinstance(sub_list, str):
for sub_el in cls.__flatten__(sub_list):
yield sub_el
else:
yield sub_list
@property
def location_list(self) -> np.ndarray:
u"""
Get a list which contained all position.
For example, an exon list `[(10, 15), (20,25)]` as input,
[10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25] will return
:return: a list which contained all position
"""
# Add 1 offset because of 0-based coordinate
position_list = list(self.__flatten__(list(map(
lambda x: range(x[0], x[1] + 1), self.se
))))
if self.strand == '-':
return np.array(position_list[::-1])
return np.array(position_list)
@property
def pep_index(self) -> np.ndarray:
u"""
Relative position of pep coordinates
:return:
"""
return np.array(list(map(lambda x: int(x / 3), range(len(self.location_list)))))
@property
def cds_index(self) -> np.ndarray:
u"""
Relative position of cds coordinates
:return:
"""
return np.array(list(range(len(self.location_list))))
@staticmethod
def __group_consecutive_value__(location_list: np.ndarray, strand: str) -> list:
u"""
group the consecutive value into a list
:param location_list: a list of location site
:param strand: the strand of the current list to group
:return:
"""
offset = -1 if strand == "-" else 1
group_ids = np.concatenate(([0], (np.diff(location_list) != offset).cumsum()))
grouped_truncated_location_array = \
np.split(
location_list,
np.unique(group_ids, return_counts=True)[1].cumsum().tolist()
)
return grouped_truncated_location_array
@classmethod
def init_from_location_list(cls, truncated_location_array: np.ndarray, strand: str):
u"""
init class based on the list of location
:param truncated_location_array: truncated location array
:param strand: the strand of the current list
:return:
"""
__coordinate_list = []
for sub_array in cls.__group_consecutive_value__(truncated_location_array, strand):
if len(sub_array) == 0:
continue
__coordinate_list.append(
tuple(
[
min(sub_array),
max(sub_array)
]
)
)
return cls(__coordinate_list, strand)
class CoordinateMapper(Coordinate):
u"""
Convert positions between CDS and protein coordinates.
TODO: Add cds to pep coordinate?
"""
def __init__(self, coordinates_list, strand: str):
u"""
Set genomic coordinates to be used for mapping
:param coordinates_list: a nested tuple of list
:param strand: a strands of given coordinate object
"""
super().__init__(coordinate_list=coordinates_list, strand=strand)
def pep_to_cds(self, pep_start: int, pep_end: Optional[int] = None):
u"""
Convert pep position into genomic position
:param pep_start: the start position of pep
:param pep_end: the end position of pep, if None, the start site is equal to end site
:return:
"""
# change 1-based pep coordinate into 0-based coordinate
pep_start = pep_start - 1
pep_end = pep_start if pep_end is None else pep_end - 1
start_ind = np.where(self.pep_index == pep_start)[0]
end_ind = np.where(self.pep_index == pep_end)[0]
cds_left_index, cds_right_index = start_ind[0], end_ind[-1] + 1
return self.init_from_location_list(
self.location_list[cds_left_index:cds_right_index],
self.strand)
if __name__ == '__main__':
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/base/CoordinateMap.py
| 0.935251 | 0.627095 |
CoordinateMap.py
|
pypi
|
u"""
Created by [email protected] at 2019.12.06
Changelog:
1. remove attributes
"""
from typing import List
from sashimi.base.GenomicLoci import GenomicLoci
class Transcript(GenomicLoci):
u"""
Created by ygidtu at 2018.12.21
A class inherit from GenomicLoci, to collect transcript information
"""
__slots__ = [
"gene",
"gene_id",
"transcript",
"transcript_id",
"exons",
"category",
"domain_category",
"domain_type",
"domain_description",
"plot_intron"
]
def __init__(
self,
chromosome: str,
start: int,
end: int,
strand: str,
exons: list,
gene: str = "",
gene_id: str = "",
transcript: str = "",
transcript_id: str = "",
category: str = "exon",
domain_category: str = "",
domain_type: str = "",
domain_description: str = ""
):
u"""
:param chromosome:
:param start:
:param end:
:param strand:
:param exons: A list of pysam.GTFProxy if category was exon, A nested tuple of list if category was protein
:param gene: gene name when category is exon,such as "SAMD11"; domain's description when category is domain such as "Disordered"
:param gene_id: gene id, such as "ENSG00000187634"
:param transcript: transcript name, such as "SAMD11-011"
:param transcript_id: transcript id, such as "ENST00000420190"
:param category: exon or protein or interval
:param domain_category: category of domain
:param domain_description: description of domain
:param domain_type: if category is protein, the type information of the given domain
"""
super().__init__(
chromosome=chromosome,
start=start,
end=end,
strand=strand
)
self.transcript = transcript
self.transcript_id = transcript_id
self.gene = gene
self.gene_id = gene_id
self.exons = sorted(exons)
self.category = category
self.domain_category = domain_category
self.domain_type = domain_type
self.domain_description = domain_description
@property
def exon_list(self):
exon_nested_lst = []
for i in self.exons:
exon_nested_lst.append(([i.start + 1, i.end]))
return exon_nested_lst
def __str__(self):
exons_str = []
for i in self.exons:
if isinstance(i, list):
"""
2022.07.05
Domain setting
"""
exons_str.append("|".join(map(lambda x: f"{x.start}-{x.end}", i)))
else:
exons_str.append("{}-{}".format(i.start, i.end))
return "{}:{}-{}:{} {} {}".format(
self.chromosome,
self.start,
self.end,
self.strand,
self.transcript,
"|".join(exons_str)
)
def __len__(self):
return sum(map(lambda x: x[1] - x[0] + 1, self.exon_list))
def __hash__(self):
exons = sorted([str(x.__hash__()) for x in self.exons])
return hash((self.chromosome, self.start, self.end, self.strand, " ".join(exons)))
def ids(self) -> List[str]:
return [self.transcript, self.transcript_id, self.gene, self.gene_id]
if __name__ == "__main__":
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/base/Transcript.py
| 0.810554 | 0.370852 |
Transcript.py
|
pypi
|
u"""
Created by [email protected] at 2019.01.04
This script contains all the basic data types used by this suite of scripts
For better organization
Changelog:
1. add relative to convert sites to relative coord
"""
class GenomicLoci(object):
u"""
Created by ygidtu at 2018.12.19
A base class to handle the position relationships
"""
__slots__ = [
"chromosome",
"start",
"end",
"strand",
"gtf_line",
"name"
]
def __init__(self, chromosome, start, end, strand, name="", gtf_line=None):
u"""
init this class
:param chromosome: str
:param start: int
:param end: int
:param strand: strand information
:param name: name of given feature
:param strand: str
"""
self.chromosome = chromosome
self.start = int(start)
self.end = int(end)
self.gtf_line = gtf_line
self.name = name
if self.end < self.start:
raise ValueError(f"End site should bigger than start site, not {self.start} -> {self.end}")
if strand == ".":
strand = "*"
if strand not in ("+", "-", "*"):
raise ValueError(f"strand should be + or -, not {strand}")
self.strand = strand
def __str__(self):
u"""
convert this to string
:return:
"""
return f"{self.chromosome}:{self.start}-{self.end}:{self.strand}"
def __gt__(self, other):
u"""
if other downstream of other
Note:
make sure the wider range is upstream of narrower
due to the sort of gtf file, therefore the transcript will be ahead of exons
:param other:
:return:
"""
if self.chromosome != other.chromosome:
return self.chromosome > other.chromosome
if self.start != other.start:
return self.start > other.start
return self.end < other.end
def __lt__(self, other):
u"""
if other is upstream of other
Note:
make sure the wider range is downstream of narrower
due to the sort of gtf file, therefore the transcript will be ahead of exons
:param other:
:return:
"""
if self.chromosome != other.chromosome:
return self.chromosome < other.chromosome
if self.start != other.start:
return self.start < other.start
return self.end > other.end
def __eq__(self, other):
u"""
if two objects are the same
:param other:
:return:
"""
return hash(self) == hash(other)
def __add__(self, other):
u"""
merge two sites into one
:param other:
:return:
"""
return GenomicLoci(
chromosome=self.chromosome,
start=min(self.start, other.start),
end=max(self.end, other.end),
strand=self.strand
)
def __hash__(self):
u"""
generate hash
:return:
"""
return hash((self.chromosome, self.start, self.end))
def __len__(self) -> int:
return self.end - self.start + 1
def is_overlap(self, other):
u"""
whether two loci have any overlaps
:param other: another GenomicLoci and it's children class
:return: Boolean
"""
return self.chromosome == other.chromosome and self.start <= other.end and self.end >= other.start
@classmethod
def create_loci(cls, string):
u"""
Create loci from String
:param string: chr1:1-100:+
:return:
"""
temp = string.split(":")
if len(temp) == 3:
chromosome, sites, strand = temp
elif len(temp) == 2:
chromosome, sites = temp
strand = "*"
else:
raise ValueError("Failed to decode genomic region: %s" % string)
start, end = sites.split("-")
return cls(chromosome, start, end, strand)
def relative(self, site: int) -> int:
return site - self.start
if __name__ == "__main__":
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/base/GenomicLoci.py
| 0.671255 | 0.447823 |
GenomicLoci.py
|
pypi
|
u"""
Created by [email protected] at 2019.12.06
Changelog:
1. move several attributes and functions to corresponding file objects, turn this into pure data class
2. add transform to log2, log10 or zscore transform the data while plotting
"""
from typing import Dict, Optional
import numpy as np
from scipy.stats import zscore
from sashimi.base.Junction import Junction
class ReadDepth(object):
u"""
Migrated from SplicePlot ReadDepth class
add a parent class to handle all the position comparison
"""
def __init__(self,
wiggle: np.array,
junctions_dict: Optional[Dict[Junction, int]] = None,
site_plus: Optional[np.array] = None,
site_minus: Optional[np.array] = None,
minus: Optional[np.array] = None,
junction_dict_plus: Optional[np.array] = None,
junction_dict_minus: Optional[np.array] = None,
strand_aware: bool = False):
u"""
init this class
:param wiggle: a numpy.ndarray object represented the whole read coverage,
should be summation of plus and minus or plus
:param junctions_dict: a dict represented the coordinate of each intron as well as frequency
:param site_plus: a numpy.ndarray object represented the forward site coverage
:param site_minus: a numpy.ndarray object represented the reverse site coverage
:param minus: a numpy.ndarray object represented the reverse strand read coverage
:param strand_aware: strand specific depth
:param junction_dict_plus: these splice junction from plus strand
:param junction_dict_minus: these splice junction from minus strand
"""
self.plus = wiggle
self.junctions_dict = junctions_dict
self.strand_aware = strand_aware
self.minus = abs(minus) if minus is not None else minus
self.junction_dict_plus = junction_dict_plus
self.junction_dict_minus = junction_dict_minus
self.site_plus = site_plus
self.site_minus = site_minus * -1 if site_minus is not None else site_minus
@property
def wiggle(self) -> np.array:
if (self.plus is None or not self.plus.any()) and self.minus is not None:
return self.minus
if self.plus is not None and self.minus is not None:
return self.plus + self.minus
return self.plus
@property
def max(self) -> float:
return max(self.wiggle, default=0)
def __add__(self, other):
"""
__add__ allows two ReadDepth objects to be added together using the + symbol
Both self and other must have the same low and high attributes
return value:
A new ReadDepth object containing the sum of the two original ReadDepth objects
"""
if self.wiggle is not None and other.wiggle is not None:
if len(self.wiggle) == len(other.wiggle):
junctions = self.junctions_dict if self.junctions_dict else {}
if other.junctions_dict:
for i, j in other.junctions_dict.items():
if i in junctions.keys():
junctions[i] += j
else:
junctions[i] = j
minus = None
if self.minus is not None and other.minus is not None:
minus = self.minus + other.minus
elif self.minus is None and other.minus is not None:
minus = other.minus
elif self.minus is not None and other.minus is None:
minus = self.minus
return ReadDepth(
self.plus + other.plus,
junctions_dict=junctions,
minus=minus
)
elif self.wiggle is None:
return other
else:
return self
def curr_height(self, pos: int) -> float:
if self.minus is None:
return self.plus[pos]
return self.plus[pos] + self.minus[pos]
def curr_max(self, pos: int) -> float:
return self.plus[pos]
def curr_min(self, pos: int) -> float:
return self.minus[pos] if self.minus is not None else 0
def add_customized_junctions(self, other):
u"""
Add customized junctions to plot
:param other:
:return:
"""
new_junctions_dict = {}
for key, value in self.junctions_dict.items():
if key in other.junctions_dict:
new_junctions_dict[key] = value + other.junctions_dict[key]
else:
new_junctions_dict[key] = value
for key, value in list(other.junctions_dict.items()):
if key not in self.junctions_dict:
new_junctions_dict[key] = value
self.junctions_dict = new_junctions_dict
return new_junctions_dict
def transform(self, log_trans: str):
funcs = {"10": np.log10, "2": np.log2, "zscore": zscore, "e": np.log}
if log_trans in funcs.keys():
if self.plus is not None:
self.plus = funcs[log_trans](self.plus + 1)
if self.minus is not None:
self.minus = funcs[log_trans](self.minus + 1)
def normalize(self, size_factor: float):
self.plus = np.divide(self.plus, size_factor) # * 100
if self.minus is not None:
self.minus = np.divide(self.minus, size_factor)
if __name__ == '__main__':
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/base/ReadDepth.py
| 0.88928 | 0.598019 |
ReadDepth.py
|
pypi
|
u"""
Generate object for processing HiC matrix information
pre-process code was re-wrote based
https://github.com/deeptools/pyGenomeTracks/blob/c42e74e725d22269c33718d9f5df11e0c45c7378/pygenometracks/tracks/HiCMatrixTrack.py#L13
"""
import itertools
from typing import Optional
import numpy as np
from loguru import logger
from scipy import sparse
from sashimi.base.GenomicLoci import GenomicLoci
from sashimi.base.Readder import Reader
class HiCTrack:
def __init__(self,
path: str,
label: str = "",
depth: int = 30000,
trans: Optional[str] = None,
matrix: Optional[np.ndarray] = None,
x_coord: Optional[np.ndarray] = None,
y_coord: Optional[np.ndarray] = None,
region: Optional[GenomicLoci] = None,
is_single_cell: bool = False
):
self.path = path
self.matrix = matrix
self.x = x_coord
self.y = y_coord
self.depth = depth
self.trans = trans
self.label = label
self.region = region
self.is_single_cell = is_single_cell
@classmethod
def create(cls,
path: str,
label: str,
depth: int,
trans: Optional[str] = False
):
"""
Create a HiCTrack object for fetching interaction matrix
:param path: the HiC file which could be one of [h5, cool / mcool / scool, hicpro, homer]
:param label: the label of the given HiC data
:param depth: the depth of the given HiC data, a bigger depth means big y-axis
:param trans: log1p, log2 or log10 transform
:return:
"""
return cls(
path=path,
label=label,
depth=depth,
trans=trans
)
def load(self,
region: GenomicLoci,
**kwargs
):
"""
Load data from the given region
:param region: the GenomicLoci object of given region
:return:
"""
hic = Reader.read_hic(path=self.path, region=region)
chromosome_id = region.chromosome
region_start = region.start
region_end = region.end
try:
chr_start_id, chr_end_id = hic.getChrBinRange(chromosome_id)
except:
logger.info("may be caused by the mismatch of chromosome")
if chromosome_id.startswith("chr"):
chromosome_id = region.chromosome.replace("chr", "") if region.chromosome != "chrM" else "MT"
else:
chromosome_id = "chr" + region.chromosome if region.chromosome != "MT" else "chrM"
chr_start_id, chr_end_id = hic.getChrBinRange(chromosome_id)
chr_start = hic.cut_intervals[chr_start_id][1]
chr_end = hic.cut_intervals[chr_end_id - 1][2]
start_bp = max(chr_start, region_start - self.depth)
end_bp = min(chr_end, region_end + self.depth)
idx = [idx for idx, x in enumerate(hic.cut_intervals)
if x[0] == chromosome_id and x[1] >= start_bp and x[2] <= end_bp]
start_pos = [x[1] for i, x in enumerate(hic.cut_intervals) if i in idx]
start_pos = tuple(list(start_pos) + [hic.cut_intervals[idx[-1]][2]])
matrix = hic.matrix[idx, :][:, idx]
region_len = region_end - region_start
current_depth = min(self.depth, int(region_len * 1.25))
depth_in_bins = max(1, int(1.5 * region_len / hic.getBinSize()))
if current_depth < self.depth:
logger.warning(f"The depth was set to {self.depth} which is more than 125% "
f"of the region plotted. The depth will be set "
f"to {current_depth}.\n")
# remove from matrix all data points that are not visible.
matrix = matrix - sparse.triu(matrix, k=depth_in_bins, format='csr')
matrix = np.asarray(matrix.todense().astype(float))
n = matrix.shape[0]
t = np.array([[1, 0.5], [-1, 0.5]])
matrix_tmp = np.dot(np.array([(i[1], i[0])
for i in itertools.product(start_pos[::-1],
start_pos)]), t)
self.x = matrix_tmp[:, 1].reshape(n + 1, n + 1)
self.y = matrix_tmp[:, 0].reshape(n + 1, n + 1)
self.matrix = HiCTrack.mat_trans(matrix, method=self.trans)
self.region = region
@staticmethod
def mat_trans(matrix: np.ndarray, method: Optional[str] = None):
if not method:
return matrix
assert method in {"log1p", "log2", "log10"}, f"couldn't recognize current transform method {method}"
if method == "log1p":
return np.log1p(matrix)
elif method == "log2":
return np.log2(matrix + 1)
elif method == "log10":
return np.log10(matrix + 1)
if __name__ == '__main__':
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/file/HiCMatrixTrack.py
| 0.830697 | 0.528959 |
HiCMatrixTrack.py
|
pypi
|
u"""
This file contains the object to handle bam file related issues.
changelog:
1. add library parameter for determining of read strand at 2022.4.28.
"""
import os
from typing import Dict, Optional, Set
import numpy as np
from loguru import logger
from sashimi.base.GenomicLoci import GenomicLoci
from sashimi.base.ReadDepth import ReadDepth
from sashimi.base.Readder import Reader
from sashimi.file.File import SingleCell
class ATAC(SingleCell):
def __init__(self,
path: str, barcode_groups: Dict[str, Set[str]], barcode: str, size_factor,
label: str = "", title: str = ""):
u"""
init this object
:param label: the left axis label
:param title: the default title to show in the upper-right of density plot
:param barcode_groups:
:param barcode: key of barcode_groups
:param size_factor
"""
super().__init__(path, barcode_groups[barcode])
self.title = title
self.label = label if label else os.path.basename(path).replace(".bam", "")
self.size_factor = size_factor
self.barcode_groups = barcode_groups
self.barcode = barcode
@classmethod
def index(cls, path: str, barcodes: Optional[Dict[str, Set[str]]] = None):
reverse_barcode_groups = {} # dict, key => barcode, value => group
for key, vals in barcodes.items():
for val in vals:
reverse_barcode_groups[val.strip()] = key.strip()
size_factors, sizes = {}, {}
for values in Reader.read_depth(path=path):
values = values.split()
count = int(values[-1])
barcode = values[-2]
if barcode not in reverse_barcode_groups.keys():
continue
key = reverse_barcode_groups[barcode]
sizes[key] = sizes.get(key, 0) + 1
size_factors[key] = size_factors.get(key, 0) + count
for key, val in size_factors.items():
size_factors[key] = size_factors[key] * sizes[key]
del sizes
median_size_factor = np.median(np.array(list(size_factors.values())))
return {x: y / median_size_factor for x, y in size_factors.items()}
@classmethod
def create(cls,
path: str,
label: str = "",
title: str = "",
barcode_groups: Optional[Dict[str, Set[str]]] = None,
barcode: Optional[str] = None,
size_factors=None):
u"""
:param path: the path to bam file
:param label: the left axis label
:param title: the default title to show in the upper-right of density plot
:param barcode_groups:
:param barcode: key of barcode_groups:
:param size_factors
:return:
"""
return cls(
path=path,
label=label,
title=title,
barcode=barcode,
barcode_groups=barcode_groups,
size_factor=size_factors
)
def __hash__(self):
return hash(self.label)
def __str__(self) -> str:
temp = []
for x in [self.title, self.label, self.path]:
if x is None or x == "":
x = "None"
temp.append(str(x))
return "\t".join(temp)
def load(self,
region: GenomicLoci,
threshold: int = 0,
required_strand: Optional[str] = None,
log_trans: Optional[str] = None,
**kwargs):
"""
determine_depth determines the coverage at each base between start_coord and end_coord, inclusive.
bam_file_path is the path to the bam file used to \
determine the depth and junctions on chromosome between start_coord and end_coord
return values:
depth_vector,
which is a Numpy array which contains the coverage at each base position between start_coord and end_coord
spanned_junctions, which is a dictionary containing the junctions supported by reads.
The keys in spanned_junctions are the
names of the junctions, with the format chromosome:lowerBasePosition-higherBasePosition
:param region: GenomicLoci object including the region for calculating coverage
:param threshold: minimums counts of the given splice junction for visualization
:param reads1: None -> all reads, True -> only R1 kept; False -> only R2 kept
:param required_strand: None -> all reads, else reads on specific strand
:param log_trans: should one of {"10": np.log10, "2": np.log2}
"""
self.region = region
self.log_trans = log_trans
depth_vector = np.zeros(len(region), dtype=int)
try:
for _, start, end, barcode, count in Reader.read_depth(path=self.path, region=region):
# filter reads by 10x barcodes
start, end, count = int(start), int(end), int(count)
if not self.empty_barcode():
if not self.has_barcode(barcode):
continue
depth_vector[(start - region.start)] += count
depth_vector[(end - region.start)] += count
except IOError as err:
logger.error('There is no .bam file at {0}'.format(self.path))
logger.error(err)
except ValueError as err:
logger.error(self.path)
logger.error(err)
self.data = ReadDepth(depth_vector)
self.data.normalize(self.size_factor[self.barcode])
return self
if __name__ == '__main__':
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/file/ATAC.py
| 0.854019 | 0.459864 |
ATAC.py
|
pypi
|
u"""
This file contains the object to handle bam file related issues.
changelog:
1. add library parameter for determining of read strand at 2022.4.28.
"""
import gzip
import os
from typing import Optional, Set
import numpy as np
import pysam
from loguru import logger
from sashimi.base.GenomicLoci import GenomicLoci
from sashimi.base.Junction import Junction
from sashimi.base.ReadDepth import ReadDepth
from sashimi.base.Readder import Reader
from sashimi.file.File import SingleCell
class Bam(SingleCell):
def __init__(self,
path: str, label: str = "",
title: str = "", barcodes: Optional[Set[str]] = None,
barcode_tag: str = "CB", umi_tag: str = "UB",
library: str = "fru", density_by_strand: bool = False):
u"""
init this object
:param label: the left axis label
:param title: the default title to show in the upper-right of density plot
:param barcodes: the path to barcodes,
default: ../filtered_feature_bc_matrix/barcodes.tsv.gz of bam file according to 10X Genomics
:param barcode_tag: the cell barcode tag, default is CB according to 10X Genomics
:param umi_tag: the UMI barcode tag, default is UB according to 10X Genomics
:param library: library for determining of read strand.
:param density_by_strand: whether to draw density plot in strand-specific manner.
"""
super().__init__(path, barcodes, barcode_tag, umi_tag)
self.title = title
self.label = label if label else os.path.basename(path).replace(".bam", "")
self.library = library
self.density_by_strand = density_by_strand
@classmethod
def create(cls,
path: str,
label: str = "",
title: str = "",
barcodes: Optional[Set[str]] = None,
barcode_tag: str = "CB",
umi_tag: str = "UB",
library: str = "fru",
density_by_strand: bool = False
):
u"""
:param path: the path to bam file
:param label: the left axis label
:param title: the default title to show in the upper-right of density plot
:param barcodes: the path to barcodes,
default: ../filtered_feature_bc_matrix/barcodes.tsv.gz of bam file according to 10X Genomics
:param barcode_tag: the cell barcode tag, default is CB according to 10X Genomics
:param umi_tag: the UMI barcode tag, default is UB according to 10X Genomics
:param library: library for determining of read strand.
:return:
"""
if not os.path.exists(path + ".bai"):
pysam.index(path)
barcode = barcodes
path = os.path.abspath(path)
if not barcodes:
barcode = set()
barcodes = os.path.join(os.path.dirname(path), "filtered_feature_bc_matrix/barcodes.tsv.gz")
if os.path.exists(barcodes):
with gzip.open(barcodes, "rt") as r:
for line in r:
barcode.add(line.strip())
return cls(
path=path,
label=label,
title=title,
barcodes=barcode,
barcode_tag=barcode_tag,
umi_tag=umi_tag,
library=library,
density_by_strand=density_by_strand
)
def __hash__(self):
return hash(self.label)
def __str__(self) -> str:
temp = []
for x in [self.title, self.label, self.path]:
if x is None or x == "":
x = "None"
temp.append(str(x))
return "\t".join(temp)
def to_csv(self) -> str:
temp = []
for x in [self.title, self.label, self.path]:
if x is None or x == "":
x = "None"
if isinstance(x, list):
x = ";".join(x)
temp.append(str(x))
return ",".join(temp)
def load(self,
region: GenomicLoci,
threshold: int = 0,
reads1: Optional[bool] = None,
required_strand: Optional[str] = None,
**kwargs
):
"""
determine_depth determines the coverage at each base between start_coord and end_coord, inclusive.
bam_file_path is the path to the bam file used to \
determine the depth and junctions on chromosome between start_coord and end_coord
return values:
depth_vector,
which is a Numpy array which contains the coverage at each base position between start_coord and end_coord
spanned_junctions, which is a dictionary containing the junctions supported by reads.
The keys in spanned_junctions are the
names of the junctions, with the format chromosome:lowerBasePosition-higherBasePosition
:param region: GenomicLoci object including the region for calculating coverage
:param threshold: minimums counts of the given splice junction for visualization
:param reads1: None -> all reads, True -> only R1 kept; False -> only R2 kept
:param required_strand: None -> all reads, else reads on specific strand
"""
self.region = region
filtered_junctions = {}
spanned_junctions = kwargs.get("junctions", {})
included_junctions = kwargs.get("included_junctions", {})
remove_duplicate_umi = kwargs.get("remove_duplicate_umi", False)
spanned_junctions_plus = dict()
spanned_junctions_minus = dict()
plus, minus = np.zeros(len(region), dtype="f"), np.zeros(len(region), dtype="f")
site_plus, site_minus = np.zeros(len(region), dtype="f"), np.zeros(len(region), dtype="f")
umis = {}
try:
for read, strand in Reader.read_bam(path=self.path, region=region, library=self.library):
# make sure that the read can be used
cigar_string = read.cigartuples
# each read must have a cigar string
if cigar_string is None:
continue
# select R1 or R2
if reads1 is True and not read.is_read1:
continue
if reads1 is False and not read.is_read2:
continue
# filter reads by 10x barcodes
# @20220924, add `not` before has_barcode and skip these reads without umi tag.
if self.barcodes:
if not read.has_tag(self.barcode_tag) or not self.has_barcode(read.get_tag(self.barcode_tag)):
continue
if remove_duplicate_umi:
barcode = read.get_tag(self.barcode_tag)
if barcode not in umis.keys():
umis[barcode] = {}
# filter reads with duplicate umi by barcode
if read.has_tag(self.umi_tag):
umi = read.get_tag(self.umi_tag)
if umi in umis[barcode].keys() and umis[barcode][umi] != hash(read.query_name):
continue
if len(umis[barcode]) == 0:
umis[barcode][umi] = hash(read.query_name)
else:
continue
start = read.reference_start
if required_strand and strand != required_strand:
continue
"""
M BAM_CMATCH 0
I BAM_CINS 1
D BAM_CDEL 2
N BAM_CREF_SKIP 3
S BAM_CSOFT_CLIP 4
H BAM_CHARD_CLIP 5
P BAM_CPAD 6
= BAM_CEQUAL 7
X BAM_CDIFF 8
B BAM_CBACK 9
"""
for cigar, length in cigar_string:
cur_start = start + 1
cur_end = start + length + 1
if cigar == 0: # M
for i in range(length):
if region.start <= start + i + 1 <= region.end:
try:
if strand == "+":
plus[start + i + 1 - region.start] += 1
elif strand == "-":
minus[start + i + 1 - region.start] += 1
else:
pass
except IndexError as err:
logger.info(region)
logger.info(cigar_string)
logger.info(start, i)
exit(err)
# remove the deletion.
if cigar not in (1, 4, 5): # I, S, H
start += length
if cigar == 3 and not kwargs.get("only_customized_junction"): # N
try:
junction_name = Junction(region.chromosome, cur_start, cur_end, strand)
if junction_name not in spanned_junctions:
spanned_junctions[junction_name] = 0
spanned_junctions[junction_name] = spanned_junctions[junction_name] + 1
except ValueError as err:
logger.warning(err)
continue
start = read.reference_start + 1 if read.reference_start + 1 > region.start else region.start
end = read.reference_end + 1 if read.reference_end + 1 < region.end else region.end
if strand == "+" and 0 <= start - region.start < len(plus):
site_plus[end - region.start] += 1
elif strand == "-" and 0 <= end - region.start < len(minus):
site_minus[start - region.start] += 1
for k, v in spanned_junctions.items():
if included_junctions and str(k) not in included_junctions:
continue
if v >= threshold:
filtered_junctions[k] = v
if k.strand == "-":
if k not in spanned_junctions_plus:
spanned_junctions_plus[k] = -1
else:
spanned_junctions_plus[k] += -1
elif k.strand == "+":
if k not in spanned_junctions_minus:
spanned_junctions_minus[k] = -1
else:
spanned_junctions_minus[k] += -1
except IOError as err:
logger.error('There is no .bam file at {0}'.format(self.path))
logger.error(err)
except ValueError as err:
logger.error(self.path)
logger.error(err)
self.data = ReadDepth(
plus if self.density_by_strand else plus + minus,
junctions_dict=filtered_junctions,
site_plus=site_plus,
site_minus=site_minus,
minus=minus if self.density_by_strand else None,
junction_dict_plus=spanned_junctions_plus,
junction_dict_minus=spanned_junctions_minus,
strand_aware=False if self.library == "fru" else True)
return self
if __name__ == '__main__':
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/file/Bam.py
| 0.825379 | 0.368519 |
Bam.py
|
pypi
|
u"""
The parent object of input files
"""
from copy import deepcopy
from typing import Optional, Set, List, Dict
class File(object):
def __init__(self, path: str):
self.path = path
self.data = None
self.label = ""
self.region = None
self.log_trans = "0"
self.title = ""
self.is_single_cell = False
@property
def chrom(self) -> str:
if self.region:
return self.region.chrom
return ""
@property
def start(self) -> int:
if self.region:
return self.region.start
return 0
@property
def end(self) -> int:
if self.region:
return self.region.end
return 0
def load(self, *args, **kwargs):
return None
def len(self, scale=1) -> int:
return len(self.data) / scale if self.data else 0
def __hash__(self) -> int:
return hash((self.path, self.label, self.title))
def __eq__(self, other):
return self.path == other.path and self.label == other.label and self.title == other.title
def transform(self):
if self.data is not None:
self.data.transform(self.log_trans)
def __set_barcodes__(barcodes: Optional[List[str]]) -> Dict:
u"""
separate barcodes by its first character to reduce set size
:params barcodes: list or set of barcodes
"""
res = {}
if barcodes is not None:
for b in barcodes:
if b:
f = b[:min(3, len(b))]
if f not in res.keys():
res[f] = set()
res[f].add(b)
return res
class SingleCell(File):
def __init__(self, path: str, barcodes: Optional[Set[str]] = None, barcode_tag: str = "CB", umi_tag: str = "UB"):
super().__init__(path)
self.barcodes = __set_barcodes__(barcodes)
self.barcode_tag = barcode_tag
self.umi_tag = umi_tag
self.is_single_cell = not self.empty_barcode()
def has_barcode(self, barcode: str) -> bool:
u"""
check whether contains barcodes
:param barcode: barcode string
"""
if barcode:
f = barcode[:min(3, len(barcode))]
temp = self.barcodes.get(f, set())
return barcode in temp
return False
def empty_barcode(self) -> bool:
u"""
check whether this bam do not contain any barcodes
"""
count = 0
for i in self.barcodes.values():
count += len(i)
if count > 0:
return False
return True
def __add__(self, other):
self.path += other.path
for i, j in other.barcodes.items():
if i not in self.barcodes.keys():
self.barcodes[i] = j
else:
self.barcodes[i] |= j
return self
def __eq__(self, other) -> bool:
return self.__hash__() == other.__hash__()
def copy(self):
return deepcopy(self)
if __name__ == "__main__":
pass
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/file/File.py
| 0.834002 | 0.37051 |
File.py
|
pypi
|
# all the drawing functions in this file assume that
# the coordinates of the svg file have been transformed to cartesian coordinates
class RGB(object):
def __init__(self, red=255, green=255, blue=255):
if red > 255 or red < 0 or green > 255 or green < 0 or blue > 255 or blue < 0:
raise Exception("Invalid color")
else:
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
@classmethod
def from_hex_string(cls, hex_string):
if len(hex_string) == 7:
hex_string = hex_string[1:]
if len(hex_string) == 6:
red = int(hex_string[0:2], 16)
green = int(hex_string[2:4], 16)
blue = int(hex_string[4:], 16)
return cls(red, green, blue)
else:
raise Exception("not a valid hexadecimal color")
def __str__(self):
return 'rgb({0}, {1}, {2})'.format(self.red, self.green, self.blue)
def draw_line(svg_file, x1, y1, x2, y2, thickness=10, color=RGB(0, 0, 0)):
svg_line = '<line x1="{0}" y1="{1}" x2="{2}" y2="{3}" style="stroke: {4}; stroke-width: {5}"/> \n'.format(x1, y1,
x2, y2,
color,
thickness)
svg_file.write(svg_line)
def draw_line_polar(svg_file, x_origin, y_origin, length, angle, thickness=10, color=RGB(0, 0, 0)):
svg_line = '<line x1="{0}" y1="{1}" x2="{2}" y2="{3}" style="stroke: {4}; stroke-width: {5}" transform="rotate({6},{0},{1})"/> \n'.format(
x_origin, y_origin, x_origin + length, y_origin, color, thickness, angle)
svg_file.write(svg_line)
def draw_bezier(svg_file, x1, y1, x2, y2, controlX, controlY, color=RGB(0, 0, 0), thickness=1):
svg_bezier = '<path d = "M{0},{1} Q{4},{5} {2},{3}" fill="none" style="stroke: {6}; stroke-width: {7}"/> \n'.format(
x1, y1, x2, y2, controlX, controlY, color, thickness)
svg_file.write(svg_bezier)
def draw_text(svg_file, words, x, y, size, angle=0, color=RGB(0, 0, 0)):
svg_text = '<text x="{0}" y="{1}" style="fill:{2}" font-size="{3}" font-family="sans-serif" transform="scale(1,-1) rotate({5},{0},{1})" text-anchor="middle">{4}</text>\n'.format(
x, -y, color, size, words, angle)
svg_file.write(svg_text)
def draw_text_left(svg_file, words, x, y, size, angle=0, color=RGB(0, 0, 0)):
svg_text = '<text x="{0}" y="{1}" style="fill:{2}" font-size="{3}" font-family="sans-serif" transform="scale(1,-1) rotate({5},{0},{1})" text-anchor="left">{4}</text>\n'.format(
x, -y, color, size, words, angle)
svg_file.write(svg_text)
def draw_multiline_text(svg_file, label, x, y, size, color=RGB(0, 0, 0)):
words_list = label.split('\n')
svg_file.write(
'<text y="{0}" style="fill:{1}" font-size="{2}" font-family="sans-serif" transform="scale(1,-1)" text-anchor="middle">'.format(
-y, color, size))
for i in range(len(words_list)):
if i == 0:
svg_file.write('<tspan x="{0}">{1}</tspan>'.format(x, words_list[i]))
else:
svg_file.write('<tspan x="{0}" dy="{1}">{2}</tspan>'.format(x, size, words_list[i]))
svg_file.write('</text>')
def draw_rectangle(svg_file, x, y, x_dim, y_dim, fill_color):
svg_rect = '<rect x="{0}" y="{1}" width="{2}" height="{3}" style="fill:{4}; stroke-width:0.01; stroke:{4}"/>\n'.format(
x, y, x_dim, y_dim, fill_color);
svg_file.write(svg_rect)
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/conf/drawing.py
| 0.67822 | 0.472683 |
drawing.py
|
pypi
|
u"""
Created by Ran Zhou at 2019/1/17 3:27 PM
This dataset was curated from Singh et al.(10.1038/s41467-018-04112-z)
"""
u"""
# domain information which was downloaded from https://ebi-uniprot.github.io/ProtVista/userGuide.html
category Type Label Description
DOMAIN_AND_SITES domain Domain Position and type of each modular protein domain
DOMAIN_AND_SITES repeat Repeat Positions of repeated sequence motifs or repeated domains
DOMAIN_AND_SITES ca_bind Calcium binding Position(s) of calcium binding region(s) within the protein
DOMAIN_AND_SITES zn_fing Zinc finger Position(s) and type(s) of zinc fingers within the protein
DOMAIN_AND_SITES dna_bind DNA binding Position and type of a DNA-binding domain
DOMAIN_AND_SITES np_bind Nucleotide binding Nucleotide phosphate binding region
DOMAIN_AND_SITES region Region Region of interest in the sequence
DOMAIN_AND_SITES coiled Coiled coil Positions of regions of coiled coil within the protein
DOMAIN_AND_SITES motif Motif Short (up to 20 amino acids) sequence motif of biological interest
DOMAIN_AND_SITES act_site Active site Amino acid(s) directly involved in the activity of an enzyme
DOMAIN_AND_SITES metal Metal binding Binding site for a metal ion
DOMAIN_AND_SITES binding Binding site Binding site for any chemical group (co-enzyme, prosthetic group, etc.)
DOMAIN_AND_SITES site Site Any interesting single amino acid site on the sequence
MOLECULE_PROCESSING init_met Initiator methionine Cleavage of the initiator methionine
MOLECULE_PROCESSING signal Signal Sequence targeting proteins to the secretory pathway or periplasmic space
MOLECULE_PROCESSING transit Transit peptide Extent of a transit peptide for organelle targeting
MOLECULE_PROCESSING propep Propeptide Part of a protein that is cleaved during maturation or activation
MOLECULE_PROCESSING chain Chain Extent of a polypeptide chain in the mature protein
MOLECULE_PROCESSING peptide Peptide Extent of an active peptide in the mature protein
PTM mod_res Modified residue Modified residues excluding lipids, glycans and protein cross-links
PTM lipid Lipidation Covalently attached lipid group(s)
PTM carbohyd Glycosylation Covalently attached glycan group(s)
PTM disulfid Disulfide bond Cysteine residues participating in disulfide bonds
PTM crosslnk Cross-link Residues participating in covalent linkage(s) between proteins
STRUCTURAL helix Helix Helical regions within the experimentally determined protein structure
STRUCTURAL turn Turn Turns within the experimentally determined protein structure
STRUCTURAL strand Beta strand Beta strand regions within the experimentally determined protein structure
TOPOLOGY topo_dom Topological domain Location of non-membrane regions of membrane-spanning proteins
TOPOLOGY transmem Transmembrane Extent of a membrane-spanning region
TOPOLOGY intramem Intramembrane Extent of a region located in a membrane without crossing it
"""
__VALID_DOMAIN_CATEGORY__ = {
'DOMAIN_AND_SITES',
'MOLECULE_PROCESSING',
'PTM',
'STRUCTURAL',
'TOPOLOGY'
}
__DOMAINFILTER__ = {"active site", "domain", "signal peptide", "transmembrane region", "repeat", "zinc finger region",
"compositionally biased region", "DNA-binding region", "region of interest",
"lipid moiety-binding region", "short sequence motif", "calcium-binding region",
"nucleotide phosphate-binding region", "metal ion-binding site", "topological domain"}
__DNABIND__ = {'C2H2-type', 'PHD-type', 'C3H1-type', 'KRAB', 'Bromo', 'Chromo', 'DNA-binding', 'C4-type', 'CHCR',
'A.T hook', 'bZIP', 'bHLH', 'CCHC-type', 'CHCH', 'Bromodomain-like', 'CH1', 'C6-type', 'A.T hook-like',
'C4H2 - type', 'CHHC-type'}
__ACTIVE__ = {'active site', 'catalytic sites'}
__TRANSREGION__ = {'transmembrane region', 'ABC transmembrane type-1', 'ABC transporter', 'ABC transmembrane type-2'}
__PPI__ = {"WD", "ANK", "TPR", "LRR", "HEAT", "Sushi", "EF-hand", "ARM", "PDZ", "PH", "SH3", "RING-type",
"LIM zinc-binding", "WW", "SH2", "BTB", "FERM", "CH", "Rod", "Coil 1A", "MH2", "WD40-like repeat",
"t-SNARE coiled-coil homology", "Coil 1B", "Cbl-PTB", "Coil", "CARD", "SH2-like", "DED", "IRS-type PTB",
"SP-RING-type", "EF-hand-like", "RING-CHtype", "v-SNARE coiled-coil homology", "Arm domain",
"LIM protein-binding", "GYF", "PDZ domain-binding", "PDZD11-binding"}
__RNABIND__ = {"RRM", "SAM", "KH", "DRBM", "RBD", "Piwi", "PAZ", "S1 motif", "Pumilio", "THUMP"}
__ANNOTATION__ = {
"PPI": __PPI__,
"TMD": __TRANSREGION__,
"activesite": __ACTIVE__,
"dnabinding": __DNABIND__,
"rnabinding": __RNABIND__
}
|
/sashimi.py-0.1.5.tar.gz/sashimi.py-0.1.5/sashimi/conf/DomainSetting.py
| 0.52756 | 0.314011 |
DomainSetting.py
|
pypi
|
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sasktranif
else:
import _sasktranif
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
NXBASE_NXVECTOR_H = _sasktranif.NXBASE_NXVECTOR_H
class nxVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_sasktranif.nxVector_swiginit(self, _sasktranif.new_nxVector(*args))
def FromSequence(self, fixedarray: "double const [3]") -> "void":
return _sasktranif.nxVector_FromSequence(self, fixedarray)
def AsSequence(self) -> "nxVector":
return _sasktranif.nxVector_AsSequence(self)
def SetCoords(self, *args) -> "void":
return _sasktranif.nxVector_SetCoords(self, *args)
def FromLatLong(self, latitude: "double", longitude: "double", magnitude: "double"=1.0) -> "void":
return _sasktranif.nxVector_FromLatLong(self, latitude, longitude, magnitude)
def IsZero(self) -> "bool":
return _sasktranif.nxVector_IsZero(self)
def IsValid(self) -> "bool":
return _sasktranif.nxVector_IsValid(self)
def SetInvalid(self) -> "void":
return _sasktranif.nxVector_SetInvalid(self)
def X(self) -> "double":
return _sasktranif.nxVector_X(self)
def Y(self) -> "double":
return _sasktranif.nxVector_Y(self)
def Z(self) -> "double":
return _sasktranif.nxVector_Z(self)
def Data(self) -> "NXVECTOR &":
return _sasktranif.nxVector_Data(self)
def AngleTo(self, V2: "nxVector") -> "double":
return _sasktranif.nxVector_AngleTo(self, V2)
def UnitVector(self) -> "nxVector":
return _sasktranif.nxVector_UnitVector(self)
def ComponentPerpendicularTo(self, Z: "nxVector") -> "nxVector":
return _sasktranif.nxVector_ComponentPerpendicularTo(self, Z)
def ComponentParallelTo(self, Z: "nxVector") -> "nxVector":
return _sasktranif.nxVector_ComponentParallelTo(self, Z)
def IndexOfMaxComponent(self) -> "int":
return _sasktranif.nxVector_IndexOfMaxComponent(self)
def Longitude(self) -> "double":
return _sasktranif.nxVector_Longitude(self)
def Latitude(self) -> "double":
return _sasktranif.nxVector_Latitude(self)
def EquatorialToGeographic(self, Tnow: "nxTimeStamp const &") -> "nxVector":
return _sasktranif.nxVector_EquatorialToGeographic(self, Tnow)
def GeographicToEquatorial(self, Tnow: "nxTimeStamp const &") -> "nxVector":
return _sasktranif.nxVector_GeographicToEquatorial(self, Tnow)
def GeographicToGeomagneticDipole(self) -> "nxVector":
return _sasktranif.nxVector_GeographicToGeomagneticDipole(self)
def TransformToNewPole(self, *args) -> "void":
return _sasktranif.nxVector_TransformToNewPole(self, *args)
def RotateAboutXaxis(self, theta: "double") -> "void":
return _sasktranif.nxVector_RotateAboutXaxis(self, theta)
def RotateAboutZaxis(self, theta: "double") -> "void":
return _sasktranif.nxVector_RotateAboutZaxis(self, theta)
def Dot(self, v2: "nxVector") -> "double":
return _sasktranif.nxVector_Dot(self, v2)
def Magnitude(self) -> "double":
return _sasktranif.nxVector_Magnitude(self)
def Cross(self, v2: "nxVector") -> "nxVector":
return _sasktranif.nxVector_Cross(self, v2)
def __iadd__(self, v2: "nxVector") -> "nxVector &":
return _sasktranif.nxVector___iadd__(self, v2)
def __isub__(self, v2: "nxVector") -> "nxVector &":
return _sasktranif.nxVector___isub__(self, v2)
def __imul__(self, *args) -> "nxVector &":
return _sasktranif.nxVector___imul__(self, *args)
def __itruediv__(self, *args):
return _sasktranif.nxVector___itruediv__(self, *args)
__idiv__ = __itruediv__
def __neg__(self) -> "nxVector":
return _sasktranif.nxVector___neg__(self)
def __add__(self, *args) -> "nxVector":
return _sasktranif.nxVector___add__(self, *args)
def __sub__(self, *args) -> "nxVector":
return _sasktranif.nxVector___sub__(self, *args)
def __truediv__(self, *args):
return _sasktranif.nxVector___truediv__(self, *args)
__div__ = __truediv__
def __xor__(self, v2: "nxVector") -> "nxVector":
return _sasktranif.nxVector___xor__(self, v2)
def __and__(self, v2: "nxVector") -> "double":
return _sasktranif.nxVector___and__(self, v2)
__swig_destroy__ = _sasktranif.delete_nxVector
# Register nxVector in _sasktranif:
_sasktranif.nxVector_swigregister(nxVector)
class GEODETIC_INSTANT(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
latitude = property(_sasktranif.GEODETIC_INSTANT_latitude_get, _sasktranif.GEODETIC_INSTANT_latitude_set)
longitude = property(_sasktranif.GEODETIC_INSTANT_longitude_get, _sasktranif.GEODETIC_INSTANT_longitude_set)
heightm = property(_sasktranif.GEODETIC_INSTANT_heightm_get, _sasktranif.GEODETIC_INSTANT_heightm_set)
mjd = property(_sasktranif.GEODETIC_INSTANT_mjd_get, _sasktranif.GEODETIC_INSTANT_mjd_set)
def __init__(self, *args):
_sasktranif.GEODETIC_INSTANT_swiginit(self, _sasktranif.new_GEODETIC_INSTANT(*args))
def __eq__(self, other: "GEODETIC_INSTANT") -> "bool":
return _sasktranif.GEODETIC_INSTANT___eq__(self, other)
def FromSequence(self, fixedarray: "double const [4]") -> "void":
return _sasktranif.GEODETIC_INSTANT_FromSequence(self, fixedarray)
def AsSequence(self) -> "GEODETIC_INSTANT":
return _sasktranif.GEODETIC_INSTANT_AsSequence(self)
__swig_destroy__ = _sasktranif.delete_GEODETIC_INSTANT
# Register GEODETIC_INSTANT in _sasktranif:
_sasktranif.GEODETIC_INSTANT_swigregister(GEODETIC_INSTANT)
def AddGlobalClimatologyHandle(name: "char const *", handle: "CLIMATOLOGY_HANDLE const &") -> "bool":
return _sasktranif.AddGlobalClimatologyHandle(name, handle)
def FindGlobalClimatologyHandle(name: "char const *", printerror: "bool"=True) -> "CLIMATOLOGY_HANDLE *":
return _sasktranif.FindGlobalClimatologyHandle(name, printerror)
def FindGlobalClimatologyNameOfHandle(handle: "CLIMATOLOGY_HANDLE &") -> "char const *":
return _sasktranif.FindGlobalClimatologyNameOfHandle(handle)
def HasKey_InGlobalClimatologyHandle(name: "char const *") -> "bool":
return _sasktranif.HasKey_InGlobalClimatologyHandle(name)
def InternalGlobalClimatologyHandleTable() -> "std::map< nxString,CLIMATOLOGY_HANDLE > *":
return _sasktranif.InternalGlobalClimatologyHandleTable()
def AddGeneratedGlobalClimatologyHandleIfNotExists(name: "char const *") -> "bool":
return _sasktranif.AddGeneratedGlobalClimatologyHandleIfNotExists(name)
def SetParentHandleTable(parenttable: "std::map< nxString,CLIMATOLOGY_HANDLE > *") -> "bool":
return _sasktranif.SetParentHandleTable(parenttable)
class ISKBasisDirection(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def Assign(self, prop: "nxVector", theta: "nxVector", phi: "nxVector") -> "void":
return _sasktranif.ISKBasisDirection_Assign(self, prop, theta, phi)
def Propagation(self) -> "nxVector const &":
return _sasktranif.ISKBasisDirection_Propagation(self)
def Theta(self) -> "nxVector const &":
return _sasktranif.ISKBasisDirection_Theta(self)
def Phi(self) -> "nxVector const &":
return _sasktranif.ISKBasisDirection_Phi(self)
def __init__(self):
_sasktranif.ISKBasisDirection_swiginit(self, _sasktranif.new_ISKBasisDirection())
__swig_destroy__ = _sasktranif.delete_ISKBasisDirection
# Register ISKBasisDirection in _sasktranif:
_sasktranif.ISKBasisDirection_swigregister(ISKBasisDirection)
cvar = _sasktranif.cvar
SKCLIMATOLOGY_AOA_DAYS = cvar.SKCLIMATOLOGY_AOA_DAYS
SKCLIMATOLOGY_AEROSOLDUST_CM3 = cvar.SKCLIMATOLOGY_AEROSOLDUST_CM3
SKCLIMATOLOGY_AEROSOLH2SO4_CM3 = cvar.SKCLIMATOLOGY_AEROSOLH2SO4_CM3
SKCLIMATOLOGY_AEROSOLICE_CM3 = cvar.SKCLIMATOLOGY_AEROSOLICE_CM3
SKCLIMATOLOGY_AEROSOLSURFACEAREA_UM2PerCM3 = cvar.SKCLIMATOLOGY_AEROSOLSURFACEAREA_UM2PerCM3
SKCLIMATOLOGY_AEROSOLWATER_CM3 = cvar.SKCLIMATOLOGY_AEROSOLWATER_CM3
SKCLIMATOLOGY_AEROSOL_CM3 = cvar.SKCLIMATOLOGY_AEROSOL_CM3
SKCLIMATOLOGY_AEROSOL_EXTINCTIONPERKM = cvar.SKCLIMATOLOGY_AEROSOL_EXTINCTIONPERKM
SKCLIMATOLOGY_AIRNUMBERDENSITY_CM3 = cvar.SKCLIMATOLOGY_AIRNUMBERDENSITY_CM3
SKCLIMATOLOGY_ALBEDO = cvar.SKCLIMATOLOGY_ALBEDO
SKCLIMATOLOGY_Ar_CM3 = cvar.SKCLIMATOLOGY_Ar_CM3
SKCLIMATOLOGY_BCPI = cvar.SKCLIMATOLOGY_BCPI
SKCLIMATOLOGY_BCPO = cvar.SKCLIMATOLOGY_BCPO
SKCLIMATOLOGY_BRCL_CM3 = cvar.SKCLIMATOLOGY_BRCL_CM3
SKCLIMATOLOGY_BRNO3_CM3 = cvar.SKCLIMATOLOGY_BRNO3_CM3
SKCLIMATOLOGY_BRO_CM3 = cvar.SKCLIMATOLOGY_BRO_CM3
SKCLIMATOLOGY_BRO_VMR = cvar.SKCLIMATOLOGY_BRO_VMR
SKCLIMATOLOGY_BRX_CM3 = cvar.SKCLIMATOLOGY_BRX_CM3
SKCLIMATOLOGY_BRX_VMR = cvar.SKCLIMATOLOGY_BRX_VMR
SKCLIMATOLOGY_BRY_CM3 = cvar.SKCLIMATOLOGY_BRY_CM3
SKCLIMATOLOGY_BRY_VMR = cvar.SKCLIMATOLOGY_BRY_VMR
SKCLIMATOLOGY_BR_CM3 = cvar.SKCLIMATOLOGY_BR_CM3
SKCLIMATOLOGY_C2H2_CM3 = cvar.SKCLIMATOLOGY_C2H2_CM3
SKCLIMATOLOGY_C2H4_CM3 = cvar.SKCLIMATOLOGY_C2H4_CM3
SKCLIMATOLOGY_C2H6_CM3 = cvar.SKCLIMATOLOGY_C2H6_CM3
SKCLIMATOLOGY_C3H6O_CM3 = cvar.SKCLIMATOLOGY_C3H6O_CM3
SKCLIMATOLOGY_C3H6O_VMR = cvar.SKCLIMATOLOGY_C3H6O_VMR
SKCLIMATOLOGY_C5H8_CM3 = cvar.SKCLIMATOLOGY_C5H8_CM3
SKCLIMATOLOGY_C5H8_VMR = cvar.SKCLIMATOLOGY_C5H8_VMR
SKCLIMATOLOGY_CCL4_CM3 = cvar.SKCLIMATOLOGY_CCL4_CM3
SKCLIMATOLOGY_CCL4_VMR = cvar.SKCLIMATOLOGY_CCL4_VMR
SKCLIMATOLOGY_CF2CL2_CM3 = cvar.SKCLIMATOLOGY_CF2CL2_CM3
SKCLIMATOLOGY_CF2CL2_VMR = cvar.SKCLIMATOLOGY_CF2CL2_VMR
SKCLIMATOLOGY_CF4_CM3 = cvar.SKCLIMATOLOGY_CF4_CM3
SKCLIMATOLOGY_CFCL3_CM3 = cvar.SKCLIMATOLOGY_CFCL3_CM3
SKCLIMATOLOGY_CFCL3_VMR = cvar.SKCLIMATOLOGY_CFCL3_VMR
SKCLIMATOLOGY_CH2O_CM3 = cvar.SKCLIMATOLOGY_CH2O_CM3
SKCLIMATOLOGY_CH2O_VMR = cvar.SKCLIMATOLOGY_CH2O_VMR
SKCLIMATOLOGY_CH3BR_CM3 = cvar.SKCLIMATOLOGY_CH3BR_CM3
SKCLIMATOLOGY_CH3BR_VMR = cvar.SKCLIMATOLOGY_CH3BR_VMR
SKCLIMATOLOGY_CH3CL_CM3 = cvar.SKCLIMATOLOGY_CH3CL_CM3
SKCLIMATOLOGY_CH3CL_VMR = cvar.SKCLIMATOLOGY_CH3CL_VMR
SKCLIMATOLOGY_CH3CN_CM3 = cvar.SKCLIMATOLOGY_CH3CN_CM3
SKCLIMATOLOGY_CH3I_CM3 = cvar.SKCLIMATOLOGY_CH3I_CM3
SKCLIMATOLOGY_CH3I_VMR = cvar.SKCLIMATOLOGY_CH3I_VMR
SKCLIMATOLOGY_CH3OH_CM3 = cvar.SKCLIMATOLOGY_CH3OH_CM3
SKCLIMATOLOGY_CH4_CM3 = cvar.SKCLIMATOLOGY_CH4_CM3
SKCLIMATOLOGY_CH4_VMR = cvar.SKCLIMATOLOGY_CH4_VMR
SKCLIMATOLOGY_CL2O2_CM3 = cvar.SKCLIMATOLOGY_CL2O2_CM3
SKCLIMATOLOGY_CL2_CM3 = cvar.SKCLIMATOLOGY_CL2_CM3
SKCLIMATOLOGY_CLNO3_CM3 = cvar.SKCLIMATOLOGY_CLNO3_CM3
SKCLIMATOLOGY_CLONO2_CM3 = cvar.SKCLIMATOLOGY_CLONO2_CM3
SKCLIMATOLOGY_CLOUD_FRACTION = cvar.SKCLIMATOLOGY_CLOUD_FRACTION
SKCLIMATOLOGY_CLO_CM3 = cvar.SKCLIMATOLOGY_CLO_CM3
SKCLIMATOLOGY_CLY_CM3 = cvar.SKCLIMATOLOGY_CLY_CM3
SKCLIMATOLOGY_CLY_VMR = cvar.SKCLIMATOLOGY_CLY_VMR
SKCLIMATOLOGY_CL_CM3 = cvar.SKCLIMATOLOGY_CL_CM3
SKCLIMATOLOGY_CO2_CM3 = cvar.SKCLIMATOLOGY_CO2_CM3
SKCLIMATOLOGY_CO2_VMR = cvar.SKCLIMATOLOGY_CO2_VMR
SKCLIMATOLOGY_COF2_CM3 = cvar.SKCLIMATOLOGY_COF2_CM3
SKCLIMATOLOGY_CO_CM3 = cvar.SKCLIMATOLOGY_CO_CM3
SKCLIMATOLOGY_CO_VMR = cvar.SKCLIMATOLOGY_CO_VMR
SKCLIMATOLOGY_DUST_0p7mu = cvar.SKCLIMATOLOGY_DUST_0p7mu
SKCLIMATOLOGY_DUST_1p4mu = cvar.SKCLIMATOLOGY_DUST_1p4mu
SKCLIMATOLOGY_DUST_2p4mu = cvar.SKCLIMATOLOGY_DUST_2p4mu
SKCLIMATOLOGY_DUST_4p5mu = cvar.SKCLIMATOLOGY_DUST_4p5mu
SKCLIMATOLOGY_EFFECTIVESIZE_MICRONS = cvar.SKCLIMATOLOGY_EFFECTIVESIZE_MICRONS
SKCLIMATOLOGY_EPV = cvar.SKCLIMATOLOGY_EPV
SKCLIMATOLOGY_GEOMETRIC_HEIGHT = cvar.SKCLIMATOLOGY_GEOMETRIC_HEIGHT
SKCLIMATOLOGY_GEOPOTENTIAL_HEIGHT = cvar.SKCLIMATOLOGY_GEOPOTENTIAL_HEIGHT
SKCLIMATOLOGY_H2CO_CM3 = cvar.SKCLIMATOLOGY_H2CO_CM3
SKCLIMATOLOGY_H2O2_CM3 = cvar.SKCLIMATOLOGY_H2O2_CM3
SKCLIMATOLOGY_H2O_CM3 = cvar.SKCLIMATOLOGY_H2O_CM3
SKCLIMATOLOGY_H2O_VMR = cvar.SKCLIMATOLOGY_H2O_VMR
SKCLIMATOLOGY_H2S_CM3 = cvar.SKCLIMATOLOGY_H2S_CM3
SKCLIMATOLOGY_H2_CM3 = cvar.SKCLIMATOLOGY_H2_CM3
SKCLIMATOLOGY_H2_VMR = cvar.SKCLIMATOLOGY_H2_VMR
SKCLIMATOLOGY_HBR_CM3 = cvar.SKCLIMATOLOGY_HBR_CM3
SKCLIMATOLOGY_HCL_CM3 = cvar.SKCLIMATOLOGY_HCL_CM3
SKCLIMATOLOGY_HCN_CM3 = cvar.SKCLIMATOLOGY_HCN_CM3
SKCLIMATOLOGY_HCOOH_CM3 = cvar.SKCLIMATOLOGY_HCOOH_CM3
SKCLIMATOLOGY_HF_CM3 = cvar.SKCLIMATOLOGY_HF_CM3
SKCLIMATOLOGY_HI_CM3 = cvar.SKCLIMATOLOGY_HI_CM3
SKCLIMATOLOGY_HNO2_CM3 = cvar.SKCLIMATOLOGY_HNO2_CM3
SKCLIMATOLOGY_HNO2_VMR = cvar.SKCLIMATOLOGY_HNO2_VMR
SKCLIMATOLOGY_HNO3_CM3 = cvar.SKCLIMATOLOGY_HNO3_CM3
SKCLIMATOLOGY_HNO3_VMR = cvar.SKCLIMATOLOGY_HNO3_VMR
SKCLIMATOLOGY_HNO4_CM3 = cvar.SKCLIMATOLOGY_HNO4_CM3
SKCLIMATOLOGY_HO2_CM3 = cvar.SKCLIMATOLOGY_HO2_CM3
SKCLIMATOLOGY_HOBR_CM3 = cvar.SKCLIMATOLOGY_HOBR_CM3
SKCLIMATOLOGY_HOCL_CM3 = cvar.SKCLIMATOLOGY_HOCL_CM3
SKCLIMATOLOGY_H_CM3 = cvar.SKCLIMATOLOGY_H_CM3
SKCLIMATOLOGY_He_CM3 = cvar.SKCLIMATOLOGY_He_CM3
SKCLIMATOLOGY_ICE_CM3 = cvar.SKCLIMATOLOGY_ICE_CM3
SKCLIMATOLOGY_JH2O = cvar.SKCLIMATOLOGY_JH2O
SKCLIMATOLOGY_LOGNORMAL_MODERADIUS_MICRONS = cvar.SKCLIMATOLOGY_LOGNORMAL_MODERADIUS_MICRONS
SKCLIMATOLOGY_LOGNORMAL_MODEWIDTH = cvar.SKCLIMATOLOGY_LOGNORMAL_MODEWIDTH
SKCLIMATOLOGY_MECL_CM3 = cvar.SKCLIMATOLOGY_MECL_CM3
SKCLIMATOLOGY_MECL_VMR = cvar.SKCLIMATOLOGY_MECL_VMR
SKCLIMATOLOGY_N2O5_CM3 = cvar.SKCLIMATOLOGY_N2O5_CM3
SKCLIMATOLOGY_N2O_CM3 = cvar.SKCLIMATOLOGY_N2O_CM3
SKCLIMATOLOGY_N2O_VMR = cvar.SKCLIMATOLOGY_N2O_VMR
SKCLIMATOLOGY_N2_CM3 = cvar.SKCLIMATOLOGY_N2_CM3
SKCLIMATOLOGY_N2_VMR = cvar.SKCLIMATOLOGY_N2_VMR
SKCLIMATOLOGY_NH3_CM3 = cvar.SKCLIMATOLOGY_NH3_CM3
SKCLIMATOLOGY_NH3_VMR = cvar.SKCLIMATOLOGY_NH3_VMR
SKCLIMATOLOGY_NITS = cvar.SKCLIMATOLOGY_NITS
SKCLIMATOLOGY_NO2_CM3 = cvar.SKCLIMATOLOGY_NO2_CM3
SKCLIMATOLOGY_NO2_VMR = cvar.SKCLIMATOLOGY_NO2_VMR
SKCLIMATOLOGY_NO3_CM3 = cvar.SKCLIMATOLOGY_NO3_CM3
SKCLIMATOLOGY_NOPLUS_CM3 = cvar.SKCLIMATOLOGY_NOPLUS_CM3
SKCLIMATOLOGY_NOY_CM3 = cvar.SKCLIMATOLOGY_NOY_CM3
SKCLIMATOLOGY_NOY_VMR = cvar.SKCLIMATOLOGY_NOY_VMR
SKCLIMATOLOGY_NO_CM3 = cvar.SKCLIMATOLOGY_NO_CM3
SKCLIMATOLOGY_NO_VMR = cvar.SKCLIMATOLOGY_NO_VMR
SKCLIMATOLOGY_N_CM3 = cvar.SKCLIMATOLOGY_N_CM3
SKCLIMATOLOGY_O2_CM3 = cvar.SKCLIMATOLOGY_O2_CM3
SKCLIMATOLOGY_O2_O2_CM6 = cvar.SKCLIMATOLOGY_O2_O2_CM6
SKCLIMATOLOGY_O2_VMR = cvar.SKCLIMATOLOGY_O2_VMR
SKCLIMATOLOGY_O3_CM3 = cvar.SKCLIMATOLOGY_O3_CM3
SKCLIMATOLOGY_O3_VMR = cvar.SKCLIMATOLOGY_O3_VMR
SKCLIMATOLOGY_OCLO_CM3 = cvar.SKCLIMATOLOGY_OCLO_CM3
SKCLIMATOLOGY_OCPI = cvar.SKCLIMATOLOGY_OCPI
SKCLIMATOLOGY_OCPO = cvar.SKCLIMATOLOGY_OCPO
SKCLIMATOLOGY_OCS_CM3 = cvar.SKCLIMATOLOGY_OCS_CM3
SKCLIMATOLOGY_OH_CM3 = cvar.SKCLIMATOLOGY_OH_CM3
SKCLIMATOLOGY_O_CM3 = cvar.SKCLIMATOLOGY_O_CM3
SKCLIMATOLOGY_PAN_CM3 = cvar.SKCLIMATOLOGY_PAN_CM3
SKCLIMATOLOGY_PAN_VMR = cvar.SKCLIMATOLOGY_PAN_VMR
SKCLIMATOLOGY_PH3_CM3 = cvar.SKCLIMATOLOGY_PH3_CM3
SKCLIMATOLOGY_POTENTIAL_TEMPERATURE_K = cvar.SKCLIMATOLOGY_POTENTIAL_TEMPERATURE_K
SKCLIMATOLOGY_PRESSURE_PA = cvar.SKCLIMATOLOGY_PRESSURE_PA
SKCLIMATOLOGY_QI_MMR = cvar.SKCLIMATOLOGY_QI_MMR
SKCLIMATOLOGY_QL_MMR = cvar.SKCLIMATOLOGY_QL_MMR
SKCLIMATOLOGY_QV = cvar.SKCLIMATOLOGY_QV
SKCLIMATOLOGY_RH = cvar.SKCLIMATOLOGY_RH
SKCLIMATOLOGY_ROOH_CM3 = cvar.SKCLIMATOLOGY_ROOH_CM3
SKCLIMATOLOGY_ROO_CM3 = cvar.SKCLIMATOLOGY_ROO_CM3
SKCLIMATOLOGY_SALA = cvar.SKCLIMATOLOGY_SALA
SKCLIMATOLOGY_SALC = cvar.SKCLIMATOLOGY_SALC
SKCLIMATOLOGY_SF6_CM3 = cvar.SKCLIMATOLOGY_SF6_CM3
SKCLIMATOLOGY_SO2_CM3 = cvar.SKCLIMATOLOGY_SO2_CM3
SKCLIMATOLOGY_SO2_VMR = cvar.SKCLIMATOLOGY_SO2_VMR
SKCLIMATOLOGY_SO4_CM3 = cvar.SKCLIMATOLOGY_SO4_CM3
SKCLIMATOLOGY_SO4_VMR = cvar.SKCLIMATOLOGY_SO4_VMR
SKCLIMATOLOGY_SURFACE_GEOMETRIC_HEIGHT = cvar.SKCLIMATOLOGY_SURFACE_GEOMETRIC_HEIGHT
SKCLIMATOLOGY_SURFACE_GEOPOTENTIAL_HEIGHT = cvar.SKCLIMATOLOGY_SURFACE_GEOPOTENTIAL_HEIGHT
SKCLIMATOLOGY_SURFACE_PRESSURE_PA = cvar.SKCLIMATOLOGY_SURFACE_PRESSURE_PA
SKCLIMATOLOGY_TEMPERATURE_K = cvar.SKCLIMATOLOGY_TEMPERATURE_K
SKCLIMATOLOGY_UNDEFINED = cvar.SKCLIMATOLOGY_UNDEFINED
SKCLIMATOLOGY_XXX_CM3 = cvar.SKCLIMATOLOGY_XXX_CM3
SKCLIMATOLOGY_XXX_VMR = cvar.SKCLIMATOLOGY_XXX_VMR
SKEMISSION_PHOTOCHEMICAL_0 = cvar.SKEMISSION_PHOTOCHEMICAL_0
SKEMISSION_PHOTOCHEMICAL_1 = cvar.SKEMISSION_PHOTOCHEMICAL_1
SKEMISSION_PHOTOCHEMICAL_2 = cvar.SKEMISSION_PHOTOCHEMICAL_2
SKEMISSION_PHOTOCHEMICAL_3 = cvar.SKEMISSION_PHOTOCHEMICAL_3
SKEMISSION_PHOTOCHEMICAL_4 = cvar.SKEMISSION_PHOTOCHEMICAL_4
SKEMISSION_PHOTOCHEMICAL_5 = cvar.SKEMISSION_PHOTOCHEMICAL_5
SKEMISSION_PHOTOCHEMICAL_6 = cvar.SKEMISSION_PHOTOCHEMICAL_6
SKEMISSION_PHOTOCHEMICAL_7 = cvar.SKEMISSION_PHOTOCHEMICAL_7
SKEMISSION_PHOTOCHEMICAL_8 = cvar.SKEMISSION_PHOTOCHEMICAL_8
SKEMISSION_PHOTOCHEMICAL_9 = cvar.SKEMISSION_PHOTOCHEMICAL_9
SKEMISSION_PHOTOCHEMICAL_O2 = cvar.SKEMISSION_PHOTOCHEMICAL_O2
SKEMISSION_PHOTOCHEMICAL_OH = cvar.SKEMISSION_PHOTOCHEMICAL_OH
SKEMISSION_PHOTOCHEMICAL_O3 = cvar.SKEMISSION_PHOTOCHEMICAL_O3
SKEMISSION_THERMAL = cvar.SKEMISSION_THERMAL
SKCLIMATOLOGY_AEROSOL_CM3_MODE2 = cvar.SKCLIMATOLOGY_AEROSOL_CM3_MODE2
SKCLIMATOLOGY_GAMMA_EFFECTIVERADIUS_MICRONS = cvar.SKCLIMATOLOGY_GAMMA_EFFECTIVERADIUS_MICRONS
SKCLIMATOLOGY_GAMMA_EFFECTIVEVARIANCE_PERMICRON = cvar.SKCLIMATOLOGY_GAMMA_EFFECTIVEVARIANCE_PERMICRON
SKCLIMATOLOGY_AEROSOL_MODERADIUS_MICRONS = cvar.SKCLIMATOLOGY_AEROSOL_MODERADIUS_MICRONS
SKCLIMATOLOGY_AEROSOL_MODEWIDTH = cvar.SKCLIMATOLOGY_AEROSOL_MODEWIDTH
SKCLIMATOLOGY_ICE_MODERADIUS_MICRONS = cvar.SKCLIMATOLOGY_ICE_MODERADIUS_MICRONS
SKCLIMATOLOGY_ICE_MODEWIDTH = cvar.SKCLIMATOLOGY_ICE_MODEWIDTH
class IQUV(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
I = property(_sasktranif.IQUV_I_get, _sasktranif.IQUV_I_set)
Q = property(_sasktranif.IQUV_Q_get, _sasktranif.IQUV_Q_set)
U = property(_sasktranif.IQUV_U_get, _sasktranif.IQUV_U_set)
V = property(_sasktranif.IQUV_V_get, _sasktranif.IQUV_V_set)
def __init__(self):
_sasktranif.IQUV_swiginit(self, _sasktranif.new_IQUV())
__swig_destroy__ = _sasktranif.delete_IQUV
# Register IQUV in _sasktranif:
_sasktranif.IQUV_swigregister(IQUV)
class ISKStokesVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_sasktranif.ISKStokesVector_swiginit(self, _sasktranif.new_ISKStokesVector(*args))
def Assign(self, stokes: "IQUV", new_basis: "ISKBasisDirection") -> "void":
return _sasktranif.ISKStokesVector_Assign(self, stokes, new_basis)
def Stokes(self) -> "IQUV const &":
return _sasktranif.ISKStokesVector_Stokes(self)
def Basis(self) -> "ISKBasisDirection const &":
return _sasktranif.ISKStokesVector_Basis(self)
def to_new_basis(self, *args) -> "void":
return _sasktranif.ISKStokesVector_to_new_basis(self, *args)
def propagation_direction(self) -> "nxVector const &":
return _sasktranif.ISKStokesVector_propagation_direction(self)
def theta_direction(self) -> "nxVector const &":
return _sasktranif.ISKStokesVector_theta_direction(self)
def phi_direction(self) -> "nxVector const &":
return _sasktranif.ISKStokesVector_phi_direction(self)
def I(self) -> "double":
return _sasktranif.ISKStokesVector_I(self)
def Q(self) -> "double":
return _sasktranif.ISKStokesVector_Q(self)
def U(self) -> "double":
return _sasktranif.ISKStokesVector_U(self)
def V(self) -> "double":
return _sasktranif.ISKStokesVector_V(self)
__swig_destroy__ = _sasktranif.delete_ISKStokesVector
# Register ISKStokesVector in _sasktranif:
_sasktranif.ISKStokesVector_swigregister(ISKStokesVector)
def SKTRAN_IFSetRegistryDirectory(registrydirname: "char const *") -> "bool":
return _sasktranif.SKTRAN_IFSetRegistryDirectory(registrydirname)
def SKTRAN_IFCreateRegistryEntriesForDLL(dllname: "char const *", paramstr: "char const *") -> "bool":
return _sasktranif.SKTRAN_IFCreateRegistryEntriesForDLL(dllname, paramstr)
class ISKModuleBase(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _sasktranif.delete_ISKModuleBase
def RawObjectUnknown(self) -> "nxUnknown *":
return _sasktranif.ISKModuleBase_RawObjectUnknown(self)
def SetProperty(self, propertyname: "char const *", valueorobject: "void *") -> "bool":
return _sasktranif.ISKModuleBase_SetProperty(self, propertyname, valueorobject)
def GetProperty(self, propertyname: "char const *") -> "bool":
return _sasktranif.ISKModuleBase_GetProperty(self, propertyname)
# Register ISKModuleBase in _sasktranif:
_sasktranif.ISKModuleBase_swigregister(ISKModuleBase)
class ISKClimatology(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, climatologyname: "char const *"):
_sasktranif.ISKClimatology_swiginit(self, _sasktranif.new_ISKClimatology(climatologyname))
__swig_destroy__ = _sasktranif.delete_ISKClimatology
def Stub(self) -> "ISKClimatology_Stub *":
return _sasktranif.ISKClimatology_Stub(self)
def Create_New_ClimatologyName(self, name: "char const *") -> "bool":
return _sasktranif.ISKClimatology_Create_New_ClimatologyName(self, name)
def IsValidObject(self) -> "bool":
return _sasktranif.ISKClimatology_IsValidObject(self)
def UpdateCache(self, location: "GEODETIC_INSTANT") -> "bool":
return _sasktranif.ISKClimatology_UpdateCache(self, location)
def GetParameter(self, climatology_handle_name: "char const *", location: "GEODETIC_INSTANT") -> "bool":
return _sasktranif.ISKClimatology_GetParameter(self, climatology_handle_name, location)
def GetHeightProfile(self, climatology_handle_name: "char const *", location: "GEODETIC_INSTANT", altitude: "double const *") -> "bool":
return _sasktranif.ISKClimatology_GetHeightProfile(self, climatology_handle_name, location, altitude)
def SetPropertyUserDefined(self, climatology_handle_name: "char const *", profilevalues: "double *") -> "bool":
return _sasktranif.ISKClimatology_SetPropertyUserDefined(self, climatology_handle_name, profilevalues)
# Register ISKClimatology in _sasktranif:
_sasktranif.ISKClimatology_swigregister(ISKClimatology)
class ISKOpticalProperty(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_sasktranif.ISKOpticalProperty_swiginit(self, _sasktranif.new_ISKOpticalProperty(*args))
__swig_destroy__ = _sasktranif.delete_ISKOpticalProperty
def Stub(self) -> "ISKOpticalProperty_Stub *":
return _sasktranif.ISKOpticalProperty_Stub(self)
def IsValidObject(self) -> "bool":
return _sasktranif.ISKOpticalProperty_IsValidObject(self)
def SetAtmosphericState(self, atmosphere: "ISKClimatology") -> "bool":
return _sasktranif.ISKOpticalProperty_SetAtmosphericState(self, atmosphere)
def SetLocation(self, pt: "GEODETIC_INSTANT") -> "bool":
return _sasktranif.ISKOpticalProperty_SetLocation(self, pt)
def InternalClimatology_UpdateCache(self, pt: "GEODETIC_INSTANT") -> "bool":
return _sasktranif.ISKOpticalProperty_InternalClimatology_UpdateCache(self, pt)
def CalculateCrossSections(self, wavenumber: "double const *") -> "bool":
return _sasktranif.ISKOpticalProperty_CalculateCrossSections(self, wavenumber)
def CalculatePhaseMatrix(self, wavenumber: "double", cosscatterangle: "double") -> "bool":
return _sasktranif.ISKOpticalProperty_CalculatePhaseMatrix(self, wavenumber, cosscatterangle)
def AddUserDefined(self, temperature: "double", wavelen_nm: "double *", crosssection: "double *") -> "bool":
return _sasktranif.ISKOpticalProperty_AddUserDefined(self, temperature, wavelen_nm, crosssection)
def AddUserDefinedPressure(self, pressure: "double *", temperature: "double *", wavelen_nm: "double *", crosssection: "double *", broadnervmr: "double") -> "bool":
return _sasktranif.ISKOpticalProperty_AddUserDefinedPressure(self, pressure, temperature, wavelen_nm, crosssection, broadnervmr)
# Register ISKOpticalProperty in _sasktranif:
_sasktranif.ISKOpticalProperty_swigregister(ISKOpticalProperty)
class ISKEmission(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, emissionname: "char const *"):
_sasktranif.ISKEmission_swiginit(self, _sasktranif.new_ISKEmission(emissionname))
__swig_destroy__ = _sasktranif.delete_ISKEmission
def Stub(self) -> "ISKEmission_Stub *":
return _sasktranif.ISKEmission_Stub(self)
def IsValidObject(self) -> "bool":
return _sasktranif.ISKEmission_IsValidObject(self)
def UpdateLocation(self, pt: "GEODETIC_INSTANT", isground: "bool") -> "bool":
return _sasktranif.ISKEmission_UpdateLocation(self, pt, isground)
def UpdateCache(self, pt: "GEODETIC_INSTANT") -> "bool":
return _sasktranif.ISKEmission_UpdateCache(self, pt)
def IsotropicEmission(self, wavenumber: "double const *") -> "bool":
return _sasktranif.ISKEmission_IsotropicEmission(self, wavenumber)
# Register ISKEmission in _sasktranif:
_sasktranif.ISKEmission_swigregister(ISKEmission)
class ISKBrdf(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, brdfname: "char const *"):
_sasktranif.ISKBrdf_swiginit(self, _sasktranif.new_ISKBrdf(brdfname))
__swig_destroy__ = _sasktranif.delete_ISKBrdf
def Stub(self) -> "ISKBrdf_Stub *":
return _sasktranif.ISKBrdf_Stub(self)
def IsValidObject(self) -> "bool":
return _sasktranif.ISKBrdf_IsValidObject(self)
def BRDF(self, wavelennm: "double", pt: "GEODETIC_INSTANT", MU_in: "double", MU_out: "double", COSDPHI: "double") -> "bool":
return _sasktranif.ISKBrdf_BRDF(self, wavelennm, pt, MU_in, MU_out, COSDPHI)
# Register ISKBrdf in _sasktranif:
_sasktranif.ISKBrdf_swigregister(ISKBrdf)
class ISKSolarSpectrum(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, solarspectrumname: "char const *"):
_sasktranif.ISKSolarSpectrum_swiginit(self, _sasktranif.new_ISKSolarSpectrum(solarspectrumname))
__swig_destroy__ = _sasktranif.delete_ISKSolarSpectrum
def IsValidObject(self) -> "bool":
return _sasktranif.ISKSolarSpectrum_IsValidObject(self)
def Irradiance(self, wavelen_nm_vacuum: "double const *") -> "bool":
return _sasktranif.ISKSolarSpectrum_Irradiance(self, wavelen_nm_vacuum)
def IrradianceAt1AU(self, wavelen_nm_vacuum: "double const *") -> "bool":
return _sasktranif.ISKSolarSpectrum_IrradianceAt1AU(self, wavelen_nm_vacuum)
def SetSolarDistanceFromMjd(self, mjd: "double") -> "bool":
return _sasktranif.ISKSolarSpectrum_SetSolarDistanceFromMjd(self, mjd)
def SetSolarDistanceFromAU(self, au: "double") -> "bool":
return _sasktranif.ISKSolarSpectrum_SetSolarDistanceFromAU(self, au)
def MinValidWavelength(self) -> "bool":
return _sasktranif.ISKSolarSpectrum_MinValidWavelength(self)
def MaxValidWavelength(self) -> "bool":
return _sasktranif.ISKSolarSpectrum_MaxValidWavelength(self)
def NanometerResolutionFWHM(self, wavelen_nm_vacuum: "double const *") -> "bool":
return _sasktranif.ISKSolarSpectrum_NanometerResolutionFWHM(self, wavelen_nm_vacuum)
def SampleSpacing(self, wavelen_nm_vacuum: "double const *") -> "bool":
return _sasktranif.ISKSolarSpectrum_SampleSpacing(self, wavelen_nm_vacuum)
# Register ISKSolarSpectrum in _sasktranif:
_sasktranif.ISKSolarSpectrum_swigregister(ISKSolarSpectrum)
class ISKStokesVectorIF(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
m_stokes = property(_sasktranif.ISKStokesVectorIF_m_stokes_get, _sasktranif.ISKStokesVectorIF_m_stokes_set)
__swig_destroy__ = _sasktranif.delete_ISKStokesVectorIF
def RawObjectUnknown(self) -> "nxUnknown *":
return _sasktranif.ISKStokesVectorIF_RawObjectUnknown(self)
def __init__(self):
_sasktranif.ISKStokesVectorIF_swiginit(self, _sasktranif.new_ISKStokesVectorIF())
# Register ISKStokesVectorIF in _sasktranif:
_sasktranif.ISKStokesVectorIF_swigregister(ISKStokesVectorIF)
class ISKEngine(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, enginename: "char const *"):
_sasktranif.ISKEngine_swiginit(self, _sasktranif.new_ISKEngine(enginename))
__swig_destroy__ = _sasktranif.delete_ISKEngine
def Stub(self) -> "ISKEngine_Stub *":
return _sasktranif.ISKEngine_Stub(self)
def IsValidObject(self) -> "bool":
return _sasktranif.ISKEngine_IsValidObject(self)
def AddLineOfSight(self, mjd: "double", observer: "nxVector", lookvector: "nxVector") -> "bool":
return _sasktranif.ISKEngine_AddLineOfSight(self, mjd, observer, lookvector)
def AddSpecies(self, climatology_handle_name: "char const *", climatology: "ISKClimatology", opticalproperty: "ISKOpticalProperty") -> "bool":
return _sasktranif.ISKEngine_AddSpecies(self, climatology_handle_name, climatology, opticalproperty)
def AddEmission(self, climatology_handle_name: "char const *", emission: "ISKEmission") -> "bool":
return _sasktranif.ISKEngine_AddEmission(self, climatology_handle_name, emission)
def SetAtmosphericState(self, climatology: "ISKClimatology") -> "bool":
return _sasktranif.ISKEngine_SetAtmosphericState(self, climatology)
def SetAlbedo(self, albedo: "double") -> "bool":
return _sasktranif.ISKEngine_SetAlbedo(self, albedo)
def SetBRDF(self, brdf: "ISKBrdf") -> "bool":
return _sasktranif.ISKEngine_SetBRDF(self, brdf)
def SetPolarizationMode(self, polarizationmode: "int") -> "bool":
return _sasktranif.ISKEngine_SetPolarizationMode(self, polarizationmode)
def SetWavelengths(self, wavelen: "double const *") -> "bool":
return _sasktranif.ISKEngine_SetWavelengths(self, wavelen)
def InitializeModel(self) -> "bool":
return _sasktranif.ISKEngine_InitializeModel(self)
def CalculateRadiance(self) -> "bool":
return _sasktranif.ISKEngine_CalculateRadiance(self)
def CalculateStokesVector(self) -> "bool":
return _sasktranif.ISKEngine_CalculateStokesVector(self)
def GetWeightingFunctions(self) -> "bool":
return _sasktranif.ISKEngine_GetWeightingFunctions(self)
def GetProperty(self, propertyname: "char const *") -> "bool":
return _sasktranif.ISKEngine_GetProperty(self, propertyname)
# Register ISKEngine in _sasktranif:
_sasktranif.ISKEngine_swigregister(ISKEngine)
class ISKGeodetic(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_sasktranif.ISKGeodetic_swiginit(self, _sasktranif.new_ISKGeodetic())
__swig_destroy__ = _sasktranif.delete_ISKGeodetic
def IsValidObject(self) -> "bool":
return _sasktranif.ISKGeodetic_IsValidObject(self)
def SetLocationLatLonAlt(self, latitude: "double", longitude: "double", alt: "double") -> "bool":
return _sasktranif.ISKGeodetic_SetLocationLatLonAlt(self, latitude, longitude, alt)
def SetLocationXYZ(self, geocentric: "nxVector") -> "bool":
return _sasktranif.ISKGeodetic_SetLocationXYZ(self, geocentric)
def SetLocationFromTangentPoint(self, r: "nxVector", lookv: "nxVector") -> "bool":
return _sasktranif.ISKGeodetic_SetLocationFromTangentPoint(self, r, lookv)
def SetLocationFromTangentAltitude(self, requiredheight: "double", spacecraftlocation: "nxVector", boresightplane: "nxVector") -> "bool":
return _sasktranif.ISKGeodetic_SetLocationFromTangentAltitude(self, requiredheight, spacecraftlocation, boresightplane)
def GetLocalWest(self) -> "nxVector":
return _sasktranif.ISKGeodetic_GetLocalWest(self)
def GetLocalSouth(self) -> "nxVector":
return _sasktranif.ISKGeodetic_GetLocalSouth(self)
def GetLocalUp(self) -> "nxVector":
return _sasktranif.ISKGeodetic_GetLocalUp(self)
def GetLocationXYZ(self) -> "nxVector":
return _sasktranif.ISKGeodetic_GetLocationXYZ(self)
def GetLongitude(self) -> "double":
return _sasktranif.ISKGeodetic_GetLongitude(self)
def GetLatitude(self) -> "double":
return _sasktranif.ISKGeodetic_GetLatitude(self)
def GetAlt(self) -> "double":
return _sasktranif.ISKGeodetic_GetAlt(self)
def GetAltitudeIntercepts(self, H: "double", observerposition: "nxVector", look: "nxVector") -> "bool":
return _sasktranif.ISKGeodetic_GetAltitudeIntercepts(self, H, observerposition, look)
def GetOsculatingSpheroidCenter(self) -> "nxVector":
return _sasktranif.ISKGeodetic_GetOsculatingSpheroidCenter(self)
def GetOsculatingSpheroidRadius(self) -> "double":
return _sasktranif.ISKGeodetic_GetOsculatingSpheroidRadius(self)
# Register ISKGeodetic in _sasktranif:
_sasktranif.ISKGeodetic_swigregister(ISKGeodetic)
class ISKMie(ISKModuleBase):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, name: "char const *"):
_sasktranif.ISKMie_swiginit(self, _sasktranif.new_ISKMie(name))
__swig_destroy__ = _sasktranif.delete_ISKMie
def Calculate(self, _lambda: "double", radius: "double", refrac_real: "double", refrac_imag: "double") -> "bool":
return _sasktranif.ISKMie_Calculate(self, _lambda, radius, refrac_real, refrac_imag)
def Qext(self) -> "double":
return _sasktranif.ISKMie_Qext(self)
def Qsca(self) -> "double":
return _sasktranif.ISKMie_Qsca(self)
def Qabs(self) -> "double":
return _sasktranif.ISKMie_Qabs(self)
def Cext(self) -> "double":
return _sasktranif.ISKMie_Cext(self)
def Csca(self) -> "double":
return _sasktranif.ISKMie_Csca(self)
def Cabs(self) -> "double":
return _sasktranif.ISKMie_Cabs(self)
def S1(self) -> "void":
return _sasktranif.ISKMie_S1(self)
def S2(self) -> "void":
return _sasktranif.ISKMie_S2(self)
def PMom(self) -> "void":
return _sasktranif.ISKMie_PMom(self)
def Sforward(self) -> "void":
return _sasktranif.ISKMie_Sforward(self)
def SBackward(self) -> "void":
return _sasktranif.ISKMie_SBackward(self)
def TForward(self, i: "int") -> "void":
return _sasktranif.ISKMie_TForward(self, i)
def TBackward(self, i: "int") -> "void":
return _sasktranif.ISKMie_TBackward(self, i)
def Spike(self) -> "double":
return _sasktranif.ISKMie_Spike(self)
# Register ISKMie in _sasktranif:
_sasktranif.ISKMie_swigregister(ISKMie)
|
/sasktran-1.8.1-cp39-cp39-macosx_10_9_x86_64.whl/sasktranif/sasktranif.py
| 0.478285 | 0.207857 |
sasktranif.py
|
pypi
|
Version history
===============
.. py:currentmodule:: happybase
HappyBase 0.9
-------------
Release date: 2014-11-24
* Fix an issue where scanners would return fewer results than expected due to
HBase not always behaving as its documentation suggests (`issue #72
<https://github.com/wbolster/happybase/issues/72>`_).
* Add support for the Thrift compact protocol (``TCompactProtocol``) in
:py:class:`Connection` (`issue #70
<https://github.com/wbolster/happybase/issues/70>`_).
HappyBase 0.8
-------------
Release date: 2014-02-25
* Add (and default to) '0.96' compatibility mode in :py:class:`Connection`.
* Add support for retrieving sorted columns, which is possible with the HBase
0.96 Thrift API. This feature uses a new `sorted_columns` argument to
:py:meth:`Table.scan`. An ```OrderedDict`` implementation is required for this
feature; with Python 2.7 this is available from the standard library, but for
Python 2.6 a separate ```ordereddict``` pacakge has to be installed from PyPI.
(`issue #39 <https://github.com/wbolster/happybase/issues/39>`_)
* The `batch_size` argument to :py:meth:`Table.scan` is no longer propagated to
`Scan.setBatching()` at the Java side (inside the Thrift server). To influence
the `Scan.setBatching()` (which may split rows into partial rows) a new
`scan_batching` argument to :py:meth:`Table.scan` has been added. See `issue
#54 <https://github.com/wbolster/happybase/issues/54>`_, `issue #56
<https://github.com/wbolster/happybase/issues/56>`_, and the HBase docs for
`Scan.setBatching()` for more details.
HappyBase 0.7
-------------
Release date: 2013-11-06
* Added a `wal` argument to various data manipulation methods on the
:py:class:`Table` and :py:class:`Batch` classes to determine whether to write
the mutation to the Write-Ahead Log (WAL). (`issue #36
<https://github.com/wbolster/happybase/issues/36>`_)
* Pass batch_size to underlying Thrift Scan instance (`issue #38
<https://github.com/wbolster/happybase/issues/38>`_).
* Expose server name and port in :py:meth:`Table.regions` (recent HBase versions
only) (`issue #37 <https://github.com/wbolster/happybase/issues/37>`_).
* Regenerated bundled Thrift API modules using a recent upstream Thrift API
definition. This is required to expose newly added API.
HappyBase 0.6
-------------
Release date: 2013-06-12
* Rewrote exception handling in connection pool. Exception handling is now a lot
cleaner and does not introduce cyclic references anymore. (`issue #25
<https://github.com/wbolster/happybase/issues/25>`_).
* Regenerated bundled Thrift code using Thrift 0.9.0 with the new-style classes
flag (`issue #27 <https://github.com/wbolster/happybase/issues/27>`_).
HappyBase 0.5
-------------
Release date: 2013-05-24
* Added a thread-safe connection pool (:py:class:`ConnectionPool`) to keep
connections open and share them between threads (`issue #21
<https://github.com/wbolster/happybase/issues/21>`_).
* The :py:meth:`Connection.delete_table` method now features an optional
`disable` parameter to make deleting enabled tables easier.
* The debug log message emitted by :py:meth:`Table.scan` when closing a scanner
now includes both the number of rows returned to the calling code, and also
the number of rows actually fetched from the server. If scanners are not
completely iterated over (e.g. because of a 'break' statement in the for loop
for the scanner), these numbers may differ. If this happens often, and the
differences are big, this may be a hint that the `batch_size` parameter to
:py:meth:`Table.scan()` is not optimal for your application.
* Increased Thrift dependency to at least 0.8. Older versions are no longer
available from PyPI. HappyBase should not be used with obsoleted Thrift
versions.
* The :py:class:`Connection` constructor now features an optional `timeout`
parameter to to specify the timeout to use for the Thrift socket (`issue #15
<https://github.com/wbolster/happybase/issues/15>`_)
* The `timestamp` argument to various methods now also accepts `long` values in
addition to `int` values. This fixes problems with large timestamp values on
32-bit systems. (`issue #23
<https://github.com/wbolster/happybase/issues/23>`_).
* In some corner cases exceptions were raised during interpreter shutdown while
closing any remaining open connections. (`issue #18
<https://github.com/wbolster/happybase/issues/18>`_)
HappyBase 0.4
-------------
Release date: 2012-07-11
* Add an optional `table_prefix_separator` argument to the
:py:class:`Connection` constructor, to specify the prefix used for the
`table_prefix` argument (`issue #3
<https://github.com/wbolster/happybase/issues/3>`_)
* Add support for framed Thrift transports using a new optional `transport`
argument to :py:class:`Connection` (`issue #6
<https://github.com/wbolster/happybase/issues/6>`_)
* Add the Apache license conditions in the :doc:`license statement <license>`
(for the included HBase parts)
* Documentation improvements
HappyBase 0.3
-------------
Release date: 2012-05-25
New features:
* Improved compatibility with HBase 0.90.x
* In earlier versions, using :py:meth:`Table.scan` in combination with HBase
0.90.x often resulted in crashes, caused by incompatibilities in the
underlying Thrift protocol.
* A new `compat` flag to the :py:class:`Connection` constructor has been
added to enable compatibility with HBase 0.90.x.
* Note that the :py:meth:`Table.scan` API has a few limitations when used
with HBase 0.90.x.
* The `row_prefix` argument to :py:meth:`Table.scan` can now be used together
with `filter` and `timestamp` arguments.
Other changes:
* Lower Thrift dependency to 0.6
* The `setup.py` script no longer installs the tests
* Documentation improvements
HappyBase 0.2
-------------
Release date: 2012-05-22
* Fix package installation, so that ``pip install happybase`` works as expected
(`issue #1 <https://github.com/wbolster/happybase/issues/1>`_)
* Various small documentation improvements
HappyBase 0.1
-------------
Release date: 2012-05-20
* Initial release
|
/sasl-happybase-1.0.tar.gz/sasl-happybase-1.0/NEWS.rst
| 0.924777 | 0.653272 |
NEWS.rst
|
pypi
|
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TCell(object):
"""
TCell - Used to transport a cell value (byte[]) and the timestamp it was
stored with together as a result for get and getRow methods. This promotes
the timestamp of a cell to a first-class value, making it easy to take
note of temporal data. Cell is used all the way from HStore up to HTable.
Attributes:
- value
- timestamp
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'value', None, None, ), # 1
(2, TType.I64, 'timestamp', None, None, ), # 2
)
def __init__(self, value=None, timestamp=None,):
self.value = value
self.timestamp = timestamp
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCell')
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 1)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 2)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnDescriptor(object):
"""
An HColumnDescriptor contains information about a column family
such as the number of versions, compression settings, etc. It is
used as input when creating a table or adding a column.
Attributes:
- name
- maxVersions
- compression
- inMemory
- bloomFilterType
- bloomFilterVectorSize
- bloomFilterNbHashes
- blockCacheEnabled
- timeToLive
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'maxVersions', None, 3, ), # 2
(3, TType.STRING, 'compression', None, "NONE", ), # 3
(4, TType.BOOL, 'inMemory', None, False, ), # 4
(5, TType.STRING, 'bloomFilterType', None, "NONE", ), # 5
(6, TType.I32, 'bloomFilterVectorSize', None, 0, ), # 6
(7, TType.I32, 'bloomFilterNbHashes', None, 0, ), # 7
(8, TType.BOOL, 'blockCacheEnabled', None, False, ), # 8
(9, TType.I32, 'timeToLive', None, -1, ), # 9
)
def __init__(self, name=None, maxVersions=thrift_spec[2][4], compression=thrift_spec[3][4], inMemory=thrift_spec[4][4], bloomFilterType=thrift_spec[5][4], bloomFilterVectorSize=thrift_spec[6][4], bloomFilterNbHashes=thrift_spec[7][4], blockCacheEnabled=thrift_spec[8][4], timeToLive=thrift_spec[9][4],):
self.name = name
self.maxVersions = maxVersions
self.compression = compression
self.inMemory = inMemory
self.bloomFilterType = bloomFilterType
self.bloomFilterVectorSize = bloomFilterVectorSize
self.bloomFilterNbHashes = bloomFilterNbHashes
self.blockCacheEnabled = blockCacheEnabled
self.timeToLive = timeToLive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.maxVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.compression = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.inMemory = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.bloomFilterType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.bloomFilterVectorSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.bloomFilterNbHashes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.blockCacheEnabled = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.timeToLive = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnDescriptor')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.maxVersions is not None:
oprot.writeFieldBegin('maxVersions', TType.I32, 2)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.STRING, 3)
oprot.writeString(self.compression)
oprot.writeFieldEnd()
if self.inMemory is not None:
oprot.writeFieldBegin('inMemory', TType.BOOL, 4)
oprot.writeBool(self.inMemory)
oprot.writeFieldEnd()
if self.bloomFilterType is not None:
oprot.writeFieldBegin('bloomFilterType', TType.STRING, 5)
oprot.writeString(self.bloomFilterType)
oprot.writeFieldEnd()
if self.bloomFilterVectorSize is not None:
oprot.writeFieldBegin('bloomFilterVectorSize', TType.I32, 6)
oprot.writeI32(self.bloomFilterVectorSize)
oprot.writeFieldEnd()
if self.bloomFilterNbHashes is not None:
oprot.writeFieldBegin('bloomFilterNbHashes', TType.I32, 7)
oprot.writeI32(self.bloomFilterNbHashes)
oprot.writeFieldEnd()
if self.blockCacheEnabled is not None:
oprot.writeFieldBegin('blockCacheEnabled', TType.BOOL, 8)
oprot.writeBool(self.blockCacheEnabled)
oprot.writeFieldEnd()
if self.timeToLive is not None:
oprot.writeFieldBegin('timeToLive', TType.I32, 9)
oprot.writeI32(self.timeToLive)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRegionInfo(object):
"""
A TRegionInfo contains information about an HTable region.
Attributes:
- startKey
- endKey
- id
- name
- version
- serverName
- port
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startKey', None, None, ), # 1
(2, TType.STRING, 'endKey', None, None, ), # 2
(3, TType.I64, 'id', None, None, ), # 3
(4, TType.STRING, 'name', None, None, ), # 4
(5, TType.BYTE, 'version', None, None, ), # 5
(6, TType.STRING, 'serverName', None, None, ), # 6
(7, TType.I32, 'port', None, None, ), # 7
)
def __init__(self, startKey=None, endKey=None, id=None, name=None, version=None, serverName=None, port=None,):
self.startKey = startKey
self.endKey = endKey
self.id = id
self.name = name
self.version = version
self.serverName = serverName
self.port = port
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.endKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.id = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BYTE:
self.version = iprot.readByte();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.serverName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.port = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRegionInfo')
if self.startKey is not None:
oprot.writeFieldBegin('startKey', TType.STRING, 1)
oprot.writeString(self.startKey)
oprot.writeFieldEnd()
if self.endKey is not None:
oprot.writeFieldBegin('endKey', TType.STRING, 2)
oprot.writeString(self.endKey)
oprot.writeFieldEnd()
if self.id is not None:
oprot.writeFieldBegin('id', TType.I64, 3)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 4)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.version is not None:
oprot.writeFieldBegin('version', TType.BYTE, 5)
oprot.writeByte(self.version)
oprot.writeFieldEnd()
if self.serverName is not None:
oprot.writeFieldBegin('serverName', TType.STRING, 6)
oprot.writeString(self.serverName)
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 7)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Mutation(object):
"""
A Mutation object is used to either update or delete a column-value.
Attributes:
- isDelete
- column
- value
- writeToWAL
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isDelete', None, False, ), # 1
(2, TType.STRING, 'column', None, None, ), # 2
(3, TType.STRING, 'value', None, None, ), # 3
(4, TType.BOOL, 'writeToWAL', None, True, ), # 4
)
def __init__(self, isDelete=thrift_spec[1][4], column=None, value=None, writeToWAL=thrift_spec[4][4],):
self.isDelete = isDelete
self.column = column
self.value = value
self.writeToWAL = writeToWAL
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isDelete = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.writeToWAL = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Mutation')
if self.isDelete is not None:
oprot.writeFieldBegin('isDelete', TType.BOOL, 1)
oprot.writeBool(self.isDelete)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 2)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.writeToWAL is not None:
oprot.writeFieldBegin('writeToWAL', TType.BOOL, 4)
oprot.writeBool(self.writeToWAL)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BatchMutation(object):
"""
A BatchMutation object is used to apply a number of Mutations to a single row.
Attributes:
- row
- mutations
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 2
)
def __init__(self, row=None, mutations=None,):
self.row = row
self.mutations = mutations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.mutations = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = Mutation()
_elem5.read(iprot)
self.mutations.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BatchMutation')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter6 in self.mutations:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIncrement(object):
"""
For increments that are not incrementColumnValue
equivalents.
Attributes:
- table
- row
- column
- ammount
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'ammount', None, None, ), # 4
)
def __init__(self, table=None, row=None, column=None, ammount=None,):
self.table = table
self.row = row
self.column = column
self.ammount = ammount
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.ammount = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TIncrement')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.ammount is not None:
oprot.writeFieldBegin('ammount', TType.I64, 4)
oprot.writeI64(self.ammount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumn(object):
"""
Holds column name and the cell.
Attributes:
- columnName
- cell
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'columnName', None, None, ), # 1
(2, TType.STRUCT, 'cell', (TCell, TCell.thrift_spec), None, ), # 2
)
def __init__(self, columnName=None, cell=None,):
self.columnName = columnName
self.cell = cell
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.columnName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.cell = TCell()
self.cell.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TColumn')
if self.columnName is not None:
oprot.writeFieldBegin('columnName', TType.STRING, 1)
oprot.writeString(self.columnName)
oprot.writeFieldEnd()
if self.cell is not None:
oprot.writeFieldBegin('cell', TType.STRUCT, 2)
self.cell.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRowResult(object):
"""
Holds row name and then a map of columns to cells.
Attributes:
- row
- columns
- sortedColumns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.MAP, 'columns', (TType.STRING,None,TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 2
(3, TType.LIST, 'sortedColumns', (TType.STRUCT,(TColumn, TColumn.thrift_spec)), None, ), # 3
)
def __init__(self, row=None, columns=None, sortedColumns=None,):
self.row = row
self.columns = columns
self.sortedColumns = sortedColumns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.columns = {}
(_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString();
_val13 = TCell()
_val13.read(iprot)
self.columns[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.sortedColumns = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = TColumn()
_elem19.read(iprot)
self.sortedColumns.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRowResult')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.columns))
for kiter20,viter21 in self.columns.items():
oprot.writeString(kiter20)
viter21.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.sortedColumns is not None:
oprot.writeFieldBegin('sortedColumns', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.sortedColumns))
for iter22 in self.sortedColumns:
iter22.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TScan(object):
"""
A Scan object is used to specify scanner parameters when opening a scanner.
Attributes:
- startRow
- stopRow
- timestamp
- columns
- caching
- filterString
- batchSize
- sortColumns
- reversed
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startRow', None, None, ), # 1
(2, TType.STRING, 'stopRow', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.I32, 'caching', None, None, ), # 5
(6, TType.STRING, 'filterString', None, None, ), # 6
(7, TType.I32, 'batchSize', None, None, ), # 7
(8, TType.BOOL, 'sortColumns', None, None, ), # 8
(9, TType.BOOL, 'reversed', None, None, ), # 9
)
def __init__(self, startRow=None, stopRow=None, timestamp=None, columns=None, caching=None, filterString=None, batchSize=None, sortColumns=None, reversed=None,):
self.startRow = startRow
self.stopRow = stopRow
self.timestamp = timestamp
self.columns = columns
self.caching = caching
self.filterString = filterString
self.batchSize = batchSize
self.sortColumns = sortColumns
self.reversed = reversed
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype26, _size23) = iprot.readListBegin()
for _i27 in xrange(_size23):
_elem28 = iprot.readString();
self.columns.append(_elem28)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.caching = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.filterString = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.batchSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.sortColumns = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.reversed = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TScan')
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 1)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 2)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter29 in self.columns:
oprot.writeString(iter29)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.caching is not None:
oprot.writeFieldBegin('caching', TType.I32, 5)
oprot.writeI32(self.caching)
oprot.writeFieldEnd()
if self.filterString is not None:
oprot.writeFieldBegin('filterString', TType.STRING, 6)
oprot.writeString(self.filterString)
oprot.writeFieldEnd()
if self.batchSize is not None:
oprot.writeFieldBegin('batchSize', TType.I32, 7)
oprot.writeI32(self.batchSize)
oprot.writeFieldEnd()
if self.sortColumns is not None:
oprot.writeFieldBegin('sortColumns', TType.BOOL, 8)
oprot.writeBool(self.sortColumns)
oprot.writeFieldEnd()
if self.reversed is not None:
oprot.writeFieldBegin('reversed', TType.BOOL, 9)
oprot.writeBool(self.reversed)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAppend(object):
"""
An Append object is used to specify the parameters for performing the append operation.
Attributes:
- table
- row
- columns
- values
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.LIST, 'values', (TType.STRING,None), None, ), # 4
)
def __init__(self, table=None, row=None, columns=None, values=None,):
self.table = table
self.row = row
self.columns = columns
self.values = values
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype33, _size30) = iprot.readListBegin()
for _i34 in xrange(_size30):
_elem35 = iprot.readString();
self.columns.append(_elem35)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.values = []
(_etype39, _size36) = iprot.readListBegin()
for _i40 in xrange(_size36):
_elem41 = iprot.readString();
self.values.append(_elem41)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAppend')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter42 in self.columns:
oprot.writeString(iter42)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.values is not None:
oprot.writeFieldBegin('values', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.values))
for iter43 in self.values:
oprot.writeString(iter43)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IOError(TException):
"""
An IOError exception signals that an error occurred communicating
to the Hbase master or an Hbase region server. Also used to return
more general Hbase error conditions.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IOError')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IllegalArgument(TException):
"""
An IllegalArgument exception indicates an illegal or invalid
argument was passed into a procedure.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IllegalArgument')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyExists(TException):
"""
An AlreadyExists exceptions signals that a table with the specified
name already exists
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyExists')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
/sasl-happybase-1.0.tar.gz/sasl-happybase-1.0/happybase/hbase/ttypes.py
| 0.479747 | 0.23814 |
ttypes.py
|
pypi
|
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(object):
def enableTable(self, tableName):
"""
Brings a table on-line (enables it)
Parameters:
- tableName: name of the table
"""
pass
def disableTable(self, tableName):
"""
Disables a table (takes it off-line) If it is being served, the master
will tell the servers to stop serving it.
Parameters:
- tableName: name of the table
"""
pass
def isTableEnabled(self, tableName):
"""
@return true if table is on-line
Parameters:
- tableName: name of the table to check
"""
pass
def compact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
pass
def majorCompact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
pass
def getTableNames(self, ):
"""
List all the userspace tables.
@return returns a list of names
"""
pass
def getColumnDescriptors(self, tableName):
"""
List all the column families assoicated with a table.
@return list of column family descriptors
Parameters:
- tableName: table name
"""
pass
def getTableRegions(self, tableName):
"""
List the regions associated with a table.
@return list of region descriptors
Parameters:
- tableName: table name
"""
pass
def createTable(self, tableName, columnFamilies):
"""
Create a table with the specified column families. The name
field for each ColumnDescriptor must be set and must end in a
colon (:). All other fields are optional and will get default
values if not explicitly specified.
@throws IllegalArgument if an input parameter is invalid
@throws AlreadyExists if the table name already exists
Parameters:
- tableName: name of table to create
- columnFamilies: list of column family descriptors
"""
pass
def deleteTable(self, tableName):
"""
Deletes a table
@throws IOError if table doesn't exist on server or there was some other
problem
Parameters:
- tableName: name of table to delete
"""
pass
def get(self, tableName, row, column, attributes):
"""
Get a single TCell for the specified table, row, and column at the
latest timestamp. Returns an empty list if no such value exists.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- attributes: Get attributes
"""
pass
def getVer(self, tableName, row, column, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
pass
def getVerTs(self, tableName, row, column, timestamp, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column. Only versions less than or equal to the specified
timestamp will be returned.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- timestamp: timestamp
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
pass
def getRow(self, tableName, row, attributes):
"""
Get all the data for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- attributes: Get attributes
"""
pass
def getRowWithColumns(self, tableName, row, columns, attributes):
"""
Get the specified columns for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
pass
def getRowTs(self, tableName, row, timestamp, attributes):
"""
Get all the data for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of the table
- row: row key
- timestamp: timestamp
- attributes: Get attributes
"""
pass
def getRowWithColumnsTs(self, tableName, row, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
pass
def getRows(self, tableName, rows, attributes):
"""
Get all the data for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- attributes: Get attributes
"""
pass
def getRowsWithColumns(self, tableName, rows, columns, attributes):
"""
Get the specified columns for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
pass
def getRowsTs(self, tableName, rows, timestamp, attributes):
"""
Get all the data for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of the table
- rows: row keys
- timestamp: timestamp
- attributes: Get attributes
"""
pass
def getRowsWithColumnsTs(self, tableName, rows, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
pass
def mutateRow(self, tableName, row, mutations, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- attributes: Mutation attributes
"""
pass
def mutateRowTs(self, tableName, row, mutations, timestamp, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- timestamp: timestamp
- attributes: Mutation attributes
"""
pass
def mutateRows(self, tableName, rowBatches, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- attributes: Mutation attributes
"""
pass
def mutateRowsTs(self, tableName, rowBatches, timestamp, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- timestamp: timestamp
- attributes: Mutation attributes
"""
pass
def atomicIncrement(self, tableName, row, column, value):
"""
Atomically increment the column value specified. Returns the next value post increment.
Parameters:
- tableName: name of table
- row: row to increment
- column: name of column
- value: amount to increment by
"""
pass
def deleteAll(self, tableName, row, column, attributes):
"""
Delete all cells that match the passed row and column.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- attributes: Delete attributes
"""
pass
def deleteAllTs(self, tableName, row, column, timestamp, attributes):
"""
Delete all cells that match the passed row and column and whose
timestamp is equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- timestamp: timestamp
- attributes: Delete attributes
"""
pass
def deleteAllRow(self, tableName, row, attributes):
"""
Completely delete the row's cells.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- attributes: Delete attributes
"""
pass
def increment(self, increment):
"""
Increment a cell by the ammount.
Increments can be applied async if hbase.regionserver.thrift.coalesceIncrement is set to true.
False is the default. Turn to true if you need the extra performance and can accept some
data loss if a thrift server dies with increments still in the queue.
Parameters:
- increment: The single increment to apply
"""
pass
def incrementRows(self, increments):
"""
Parameters:
- increments: The list of increments
"""
pass
def deleteAllRowTs(self, tableName, row, timestamp, attributes):
"""
Completely delete the row's cells marked with a timestamp
equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- timestamp: timestamp
- attributes: Delete attributes
"""
pass
def scannerOpenWithScan(self, tableName, scan, attributes):
"""
Get a scanner on the current table, using the Scan instance
for the scan parameters.
Parameters:
- tableName: name of table
- scan: Scan instance
- attributes: Scan attributes
"""
pass
def scannerOpen(self, tableName, startRow, columns, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
pass
def scannerOpenWithStop(self, tableName, startRow, stopRow, columns, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
pass
def scannerOpenWithPrefix(self, tableName, startAndPrefix, columns, attributes):
"""
Open a scanner for a given prefix. That is all rows will have the specified
prefix. No other rows will be returned.
@return scanner id to use with other scanner calls
Parameters:
- tableName: name of table
- startAndPrefix: the prefix (and thus start row) of the keys you want
- columns: the columns you want returned
- attributes: Scan attributes
"""
pass
def scannerOpenTs(self, tableName, startRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
Only values with the specified timestamp are returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
pass
def scannerOpenWithStopTs(self, tableName, startRow, stopRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns. Only values with the specified timestamp are
returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
pass
def scannerGet(self, id):
"""
Returns the scanner's current row value and advances to the next
row in the table. When there are no more rows in the table, or a key
greater-than-or-equal-to the scanner's specified stopRow is reached,
an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
"""
pass
def scannerGetList(self, id, nbRows):
"""
Returns, starting at the scanner's current row value nbRows worth of
rows and advances to the next row in the table. When there are no more
rows in the table, or a key greater-than-or-equal-to the scanner's
specified stopRow is reached, an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
- nbRows: number of results to return
"""
pass
def scannerClose(self, id):
"""
Closes the server-state associated with an open scanner.
@throws IllegalArgument if ScannerID is invalid
Parameters:
- id: id of a scanner returned by scannerOpen
"""
pass
def getRowOrBefore(self, tableName, row, family):
"""
Get the row just before the specified one.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- family: column name
"""
pass
def getRegionInfo(self, row):
"""
Get the regininfo for the specified row. It scans
the metatable to find region's start and end keys.
@return value for specified row/column
Parameters:
- row: row key
"""
pass
def append(self, append):
"""
Appends values to one or more columns within a single row.
@return values of columns after the append operation.
Parameters:
- append: The single append operation to apply
"""
pass
def checkAndPut(self, tableName, row, column, value, mput, attributes):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the corresponding mutation operation for put.
@return true if the new put was executed, false otherwise
Parameters:
- tableName: name of table
- row: row key
- column: column name
- value: the expected value for the column parameter, if not
provided the check is for the non-existence of the
column in question
- mput: mutation for the put
- attributes: Mutation attributes
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def enableTable(self, tableName):
"""
Brings a table on-line (enables it)
Parameters:
- tableName: name of the table
"""
self.send_enableTable(tableName)
self.recv_enableTable()
def send_enableTable(self, tableName):
self._oprot.writeMessageBegin('enableTable', TMessageType.CALL, self._seqid)
args = enableTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_enableTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = enableTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def disableTable(self, tableName):
"""
Disables a table (takes it off-line) If it is being served, the master
will tell the servers to stop serving it.
Parameters:
- tableName: name of the table
"""
self.send_disableTable(tableName)
self.recv_disableTable()
def send_disableTable(self, tableName):
self._oprot.writeMessageBegin('disableTable', TMessageType.CALL, self._seqid)
args = disableTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_disableTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = disableTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def isTableEnabled(self, tableName):
"""
@return true if table is on-line
Parameters:
- tableName: name of the table to check
"""
self.send_isTableEnabled(tableName)
return self.recv_isTableEnabled()
def send_isTableEnabled(self, tableName):
self._oprot.writeMessageBegin('isTableEnabled', TMessageType.CALL, self._seqid)
args = isTableEnabled_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isTableEnabled(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = isTableEnabled_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "isTableEnabled failed: unknown result");
def compact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
self.send_compact(tableNameOrRegionName)
self.recv_compact()
def send_compact(self, tableNameOrRegionName):
self._oprot.writeMessageBegin('compact', TMessageType.CALL, self._seqid)
args = compact_args()
args.tableNameOrRegionName = tableNameOrRegionName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_compact(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = compact_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def majorCompact(self, tableNameOrRegionName):
"""
Parameters:
- tableNameOrRegionName
"""
self.send_majorCompact(tableNameOrRegionName)
self.recv_majorCompact()
def send_majorCompact(self, tableNameOrRegionName):
self._oprot.writeMessageBegin('majorCompact', TMessageType.CALL, self._seqid)
args = majorCompact_args()
args.tableNameOrRegionName = tableNameOrRegionName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_majorCompact(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = majorCompact_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def getTableNames(self, ):
"""
List all the userspace tables.
@return returns a list of names
"""
self.send_getTableNames()
return self.recv_getTableNames()
def send_getTableNames(self, ):
self._oprot.writeMessageBegin('getTableNames', TMessageType.CALL, self._seqid)
args = getTableNames_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableNames(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTableNames_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableNames failed: unknown result");
def getColumnDescriptors(self, tableName):
"""
List all the column families assoicated with a table.
@return list of column family descriptors
Parameters:
- tableName: table name
"""
self.send_getColumnDescriptors(tableName)
return self.recv_getColumnDescriptors()
def send_getColumnDescriptors(self, tableName):
self._oprot.writeMessageBegin('getColumnDescriptors', TMessageType.CALL, self._seqid)
args = getColumnDescriptors_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getColumnDescriptors(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getColumnDescriptors_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getColumnDescriptors failed: unknown result");
def getTableRegions(self, tableName):
"""
List the regions associated with a table.
@return list of region descriptors
Parameters:
- tableName: table name
"""
self.send_getTableRegions(tableName)
return self.recv_getTableRegions()
def send_getTableRegions(self, tableName):
self._oprot.writeMessageBegin('getTableRegions', TMessageType.CALL, self._seqid)
args = getTableRegions_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTableRegions(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTableRegions_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTableRegions failed: unknown result");
def createTable(self, tableName, columnFamilies):
"""
Create a table with the specified column families. The name
field for each ColumnDescriptor must be set and must end in a
colon (:). All other fields are optional and will get default
values if not explicitly specified.
@throws IllegalArgument if an input parameter is invalid
@throws AlreadyExists if the table name already exists
Parameters:
- tableName: name of table to create
- columnFamilies: list of column family descriptors
"""
self.send_createTable(tableName, columnFamilies)
self.recv_createTable()
def send_createTable(self, tableName, columnFamilies):
self._oprot.writeMessageBegin('createTable', TMessageType.CALL, self._seqid)
args = createTable_args()
args.tableName = tableName
args.columnFamilies = columnFamilies
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = createTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
if result.exist is not None:
raise result.exist
return
def deleteTable(self, tableName):
"""
Deletes a table
@throws IOError if table doesn't exist on server or there was some other
problem
Parameters:
- tableName: name of table to delete
"""
self.send_deleteTable(tableName)
self.recv_deleteTable()
def send_deleteTable(self, tableName):
self._oprot.writeMessageBegin('deleteTable', TMessageType.CALL, self._seqid)
args = deleteTable_args()
args.tableName = tableName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteTable(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def get(self, tableName, row, column, attributes):
"""
Get a single TCell for the specified table, row, and column at the
latest timestamp. Returns an empty list if no such value exists.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- attributes: Get attributes
"""
self.send_get(tableName, row, column, attributes)
return self.recv_get()
def send_get(self, tableName, row, column, attributes):
self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
args = get_args()
args.tableName = tableName
args.row = row
args.column = column
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result");
def getVer(self, tableName, row, column, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
self.send_getVer(tableName, row, column, numVersions, attributes)
return self.recv_getVer()
def send_getVer(self, tableName, row, column, numVersions, attributes):
self._oprot.writeMessageBegin('getVer', TMessageType.CALL, self._seqid)
args = getVer_args()
args.tableName = tableName
args.row = row
args.column = column
args.numVersions = numVersions
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getVer(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getVer_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getVer failed: unknown result");
def getVerTs(self, tableName, row, column, timestamp, numVersions, attributes):
"""
Get the specified number of versions for the specified table,
row, and column. Only versions less than or equal to the specified
timestamp will be returned.
@return list of cells for specified row/column
Parameters:
- tableName: name of table
- row: row key
- column: column name
- timestamp: timestamp
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
self.send_getVerTs(tableName, row, column, timestamp, numVersions, attributes)
return self.recv_getVerTs()
def send_getVerTs(self, tableName, row, column, timestamp, numVersions, attributes):
self._oprot.writeMessageBegin('getVerTs', TMessageType.CALL, self._seqid)
args = getVerTs_args()
args.tableName = tableName
args.row = row
args.column = column
args.timestamp = timestamp
args.numVersions = numVersions
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getVerTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getVerTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getVerTs failed: unknown result");
def getRow(self, tableName, row, attributes):
"""
Get all the data for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- attributes: Get attributes
"""
self.send_getRow(tableName, row, attributes)
return self.recv_getRow()
def send_getRow(self, tableName, row, attributes):
self._oprot.writeMessageBegin('getRow', TMessageType.CALL, self._seqid)
args = getRow_args()
args.tableName = tableName
args.row = row
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRow(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRow_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRow failed: unknown result");
def getRowWithColumns(self, tableName, row, columns, attributes):
"""
Get the specified columns for the specified table and row at the latest
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
self.send_getRowWithColumns(tableName, row, columns, attributes)
return self.recv_getRowWithColumns()
def send_getRowWithColumns(self, tableName, row, columns, attributes):
self._oprot.writeMessageBegin('getRowWithColumns', TMessageType.CALL, self._seqid)
args = getRowWithColumns_args()
args.tableName = tableName
args.row = row
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowWithColumns(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowWithColumns_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowWithColumns failed: unknown result");
def getRowTs(self, tableName, row, timestamp, attributes):
"""
Get all the data for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of the table
- row: row key
- timestamp: timestamp
- attributes: Get attributes
"""
self.send_getRowTs(tableName, row, timestamp, attributes)
return self.recv_getRowTs()
def send_getRowTs(self, tableName, row, timestamp, attributes):
self._oprot.writeMessageBegin('getRowTs', TMessageType.CALL, self._seqid)
args = getRowTs_args()
args.tableName = tableName
args.row = row
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowTs failed: unknown result");
def getRowWithColumnsTs(self, tableName, row, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and row at the specified
timestamp. Returns an empty list if the row does not exist.
@return TRowResult containing the row and map of columns to TCells
Parameters:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
self.send_getRowWithColumnsTs(tableName, row, columns, timestamp, attributes)
return self.recv_getRowWithColumnsTs()
def send_getRowWithColumnsTs(self, tableName, row, columns, timestamp, attributes):
self._oprot.writeMessageBegin('getRowWithColumnsTs', TMessageType.CALL, self._seqid)
args = getRowWithColumnsTs_args()
args.tableName = tableName
args.row = row
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowWithColumnsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowWithColumnsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowWithColumnsTs failed: unknown result");
def getRows(self, tableName, rows, attributes):
"""
Get all the data for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- attributes: Get attributes
"""
self.send_getRows(tableName, rows, attributes)
return self.recv_getRows()
def send_getRows(self, tableName, rows, attributes):
self._oprot.writeMessageBegin('getRows', TMessageType.CALL, self._seqid)
args = getRows_args()
args.tableName = tableName
args.rows = rows
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRows failed: unknown result");
def getRowsWithColumns(self, tableName, rows, columns, attributes):
"""
Get the specified columns for the specified table and rows at the latest
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
self.send_getRowsWithColumns(tableName, rows, columns, attributes)
return self.recv_getRowsWithColumns()
def send_getRowsWithColumns(self, tableName, rows, columns, attributes):
self._oprot.writeMessageBegin('getRowsWithColumns', TMessageType.CALL, self._seqid)
args = getRowsWithColumns_args()
args.tableName = tableName
args.rows = rows
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowsWithColumns(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowsWithColumns_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowsWithColumns failed: unknown result");
def getRowsTs(self, tableName, rows, timestamp, attributes):
"""
Get all the data for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of the table
- rows: row keys
- timestamp: timestamp
- attributes: Get attributes
"""
self.send_getRowsTs(tableName, rows, timestamp, attributes)
return self.recv_getRowsTs()
def send_getRowsTs(self, tableName, rows, timestamp, attributes):
self._oprot.writeMessageBegin('getRowsTs', TMessageType.CALL, self._seqid)
args = getRowsTs_args()
args.tableName = tableName
args.rows = rows
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowsTs failed: unknown result");
def getRowsWithColumnsTs(self, tableName, rows, columns, timestamp, attributes):
"""
Get the specified columns for the specified table and rows at the specified
timestamp. Returns an empty list if no rows exist.
@return TRowResult containing the rows and map of columns to TCells
Parameters:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
self.send_getRowsWithColumnsTs(tableName, rows, columns, timestamp, attributes)
return self.recv_getRowsWithColumnsTs()
def send_getRowsWithColumnsTs(self, tableName, rows, columns, timestamp, attributes):
self._oprot.writeMessageBegin('getRowsWithColumnsTs', TMessageType.CALL, self._seqid)
args = getRowsWithColumnsTs_args()
args.tableName = tableName
args.rows = rows
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowsWithColumnsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowsWithColumnsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowsWithColumnsTs failed: unknown result");
def mutateRow(self, tableName, row, mutations, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- attributes: Mutation attributes
"""
self.send_mutateRow(tableName, row, mutations, attributes)
self.recv_mutateRow()
def send_mutateRow(self, tableName, row, mutations, attributes):
self._oprot.writeMessageBegin('mutateRow', TMessageType.CALL, self._seqid)
args = mutateRow_args()
args.tableName = tableName
args.row = row
args.mutations = mutations
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRow(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRow_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRowTs(self, tableName, row, mutations, timestamp, attributes):
"""
Apply a series of mutations (updates/deletes) to a row in a
single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- timestamp: timestamp
- attributes: Mutation attributes
"""
self.send_mutateRowTs(tableName, row, mutations, timestamp, attributes)
self.recv_mutateRowTs()
def send_mutateRowTs(self, tableName, row, mutations, timestamp, attributes):
self._oprot.writeMessageBegin('mutateRowTs', TMessageType.CALL, self._seqid)
args = mutateRowTs_args()
args.tableName = tableName
args.row = row
args.mutations = mutations
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRowTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRowTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRows(self, tableName, rowBatches, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. Default current timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- attributes: Mutation attributes
"""
self.send_mutateRows(tableName, rowBatches, attributes)
self.recv_mutateRows()
def send_mutateRows(self, tableName, rowBatches, attributes):
self._oprot.writeMessageBegin('mutateRows', TMessageType.CALL, self._seqid)
args = mutateRows_args()
args.tableName = tableName
args.rowBatches = rowBatches
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def mutateRowsTs(self, tableName, rowBatches, timestamp, attributes):
"""
Apply a series of batches (each a series of mutations on a single row)
in a single transaction. If an exception is thrown, then the
transaction is aborted. The specified timestamp is used, and
all entries will have an identical timestamp.
Parameters:
- tableName: name of table
- rowBatches: list of row batches
- timestamp: timestamp
- attributes: Mutation attributes
"""
self.send_mutateRowsTs(tableName, rowBatches, timestamp, attributes)
self.recv_mutateRowsTs()
def send_mutateRowsTs(self, tableName, rowBatches, timestamp, attributes):
self._oprot.writeMessageBegin('mutateRowsTs', TMessageType.CALL, self._seqid)
args = mutateRowsTs_args()
args.tableName = tableName
args.rowBatches = rowBatches
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mutateRowsTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mutateRowsTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def atomicIncrement(self, tableName, row, column, value):
"""
Atomically increment the column value specified. Returns the next value post increment.
Parameters:
- tableName: name of table
- row: row to increment
- column: name of column
- value: amount to increment by
"""
self.send_atomicIncrement(tableName, row, column, value)
return self.recv_atomicIncrement()
def send_atomicIncrement(self, tableName, row, column, value):
self._oprot.writeMessageBegin('atomicIncrement', TMessageType.CALL, self._seqid)
args = atomicIncrement_args()
args.tableName = tableName
args.row = row
args.column = column
args.value = value
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_atomicIncrement(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = atomicIncrement_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "atomicIncrement failed: unknown result");
def deleteAll(self, tableName, row, column, attributes):
"""
Delete all cells that match the passed row and column.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- attributes: Delete attributes
"""
self.send_deleteAll(tableName, row, column, attributes)
self.recv_deleteAll()
def send_deleteAll(self, tableName, row, column, attributes):
self._oprot.writeMessageBegin('deleteAll', TMessageType.CALL, self._seqid)
args = deleteAll_args()
args.tableName = tableName
args.row = row
args.column = column
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAll(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAll_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteAllTs(self, tableName, row, column, timestamp, attributes):
"""
Delete all cells that match the passed row and column and whose
timestamp is equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- timestamp: timestamp
- attributes: Delete attributes
"""
self.send_deleteAllTs(tableName, row, column, timestamp, attributes)
self.recv_deleteAllTs()
def send_deleteAllTs(self, tableName, row, column, timestamp, attributes):
self._oprot.writeMessageBegin('deleteAllTs', TMessageType.CALL, self._seqid)
args = deleteAllTs_args()
args.tableName = tableName
args.row = row
args.column = column
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAllTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAllTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteAllRow(self, tableName, row, attributes):
"""
Completely delete the row's cells.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- attributes: Delete attributes
"""
self.send_deleteAllRow(tableName, row, attributes)
self.recv_deleteAllRow()
def send_deleteAllRow(self, tableName, row, attributes):
self._oprot.writeMessageBegin('deleteAllRow', TMessageType.CALL, self._seqid)
args = deleteAllRow_args()
args.tableName = tableName
args.row = row
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAllRow(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAllRow_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def increment(self, increment):
"""
Increment a cell by the ammount.
Increments can be applied async if hbase.regionserver.thrift.coalesceIncrement is set to true.
False is the default. Turn to true if you need the extra performance and can accept some
data loss if a thrift server dies with increments still in the queue.
Parameters:
- increment: The single increment to apply
"""
self.send_increment(increment)
self.recv_increment()
def send_increment(self, increment):
self._oprot.writeMessageBegin('increment', TMessageType.CALL, self._seqid)
args = increment_args()
args.increment = increment
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_increment(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = increment_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def incrementRows(self, increments):
"""
Parameters:
- increments: The list of increments
"""
self.send_incrementRows(increments)
self.recv_incrementRows()
def send_incrementRows(self, increments):
self._oprot.writeMessageBegin('incrementRows', TMessageType.CALL, self._seqid)
args = incrementRows_args()
args.increments = increments
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_incrementRows(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = incrementRows_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def deleteAllRowTs(self, tableName, row, timestamp, attributes):
"""
Completely delete the row's cells marked with a timestamp
equal-to or older than the passed timestamp.
Parameters:
- tableName: name of table
- row: key of the row to be completely deleted.
- timestamp: timestamp
- attributes: Delete attributes
"""
self.send_deleteAllRowTs(tableName, row, timestamp, attributes)
self.recv_deleteAllRowTs()
def send_deleteAllRowTs(self, tableName, row, timestamp, attributes):
self._oprot.writeMessageBegin('deleteAllRowTs', TMessageType.CALL, self._seqid)
args = deleteAllRowTs_args()
args.tableName = tableName
args.row = row
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAllRowTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteAllRowTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
return
def scannerOpenWithScan(self, tableName, scan, attributes):
"""
Get a scanner on the current table, using the Scan instance
for the scan parameters.
Parameters:
- tableName: name of table
- scan: Scan instance
- attributes: Scan attributes
"""
self.send_scannerOpenWithScan(tableName, scan, attributes)
return self.recv_scannerOpenWithScan()
def send_scannerOpenWithScan(self, tableName, scan, attributes):
self._oprot.writeMessageBegin('scannerOpenWithScan', TMessageType.CALL, self._seqid)
args = scannerOpenWithScan_args()
args.tableName = tableName
args.scan = scan
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithScan(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithScan_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithScan failed: unknown result");
def scannerOpen(self, tableName, startRow, columns, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
self.send_scannerOpen(tableName, startRow, columns, attributes)
return self.recv_scannerOpen()
def send_scannerOpen(self, tableName, startRow, columns, attributes):
self._oprot.writeMessageBegin('scannerOpen', TMessageType.CALL, self._seqid)
args = scannerOpen_args()
args.tableName = tableName
args.startRow = startRow
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpen(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpen_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpen failed: unknown result");
def scannerOpenWithStop(self, tableName, startRow, stopRow, columns, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
self.send_scannerOpenWithStop(tableName, startRow, stopRow, columns, attributes)
return self.recv_scannerOpenWithStop()
def send_scannerOpenWithStop(self, tableName, startRow, stopRow, columns, attributes):
self._oprot.writeMessageBegin('scannerOpenWithStop', TMessageType.CALL, self._seqid)
args = scannerOpenWithStop_args()
args.tableName = tableName
args.startRow = startRow
args.stopRow = stopRow
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithStop(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithStop_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithStop failed: unknown result");
def scannerOpenWithPrefix(self, tableName, startAndPrefix, columns, attributes):
"""
Open a scanner for a given prefix. That is all rows will have the specified
prefix. No other rows will be returned.
@return scanner id to use with other scanner calls
Parameters:
- tableName: name of table
- startAndPrefix: the prefix (and thus start row) of the keys you want
- columns: the columns you want returned
- attributes: Scan attributes
"""
self.send_scannerOpenWithPrefix(tableName, startAndPrefix, columns, attributes)
return self.recv_scannerOpenWithPrefix()
def send_scannerOpenWithPrefix(self, tableName, startAndPrefix, columns, attributes):
self._oprot.writeMessageBegin('scannerOpenWithPrefix', TMessageType.CALL, self._seqid)
args = scannerOpenWithPrefix_args()
args.tableName = tableName
args.startAndPrefix = startAndPrefix
args.columns = columns
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithPrefix(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithPrefix_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithPrefix failed: unknown result");
def scannerOpenTs(self, tableName, startRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting at the specified row and
ending at the last row in the table. Return the specified columns.
Only values with the specified timestamp are returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
self.send_scannerOpenTs(tableName, startRow, columns, timestamp, attributes)
return self.recv_scannerOpenTs()
def send_scannerOpenTs(self, tableName, startRow, columns, timestamp, attributes):
self._oprot.writeMessageBegin('scannerOpenTs', TMessageType.CALL, self._seqid)
args = scannerOpenTs_args()
args.tableName = tableName
args.startRow = startRow
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenTs failed: unknown result");
def scannerOpenWithStopTs(self, tableName, startRow, stopRow, columns, timestamp, attributes):
"""
Get a scanner on the current table starting and stopping at the
specified rows. ending at the last row in the table. Return the
specified columns. Only values with the specified timestamp are
returned.
@return scanner id to be used with other scanner procedures
Parameters:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
self.send_scannerOpenWithStopTs(tableName, startRow, stopRow, columns, timestamp, attributes)
return self.recv_scannerOpenWithStopTs()
def send_scannerOpenWithStopTs(self, tableName, startRow, stopRow, columns, timestamp, attributes):
self._oprot.writeMessageBegin('scannerOpenWithStopTs', TMessageType.CALL, self._seqid)
args = scannerOpenWithStopTs_args()
args.tableName = tableName
args.startRow = startRow
args.stopRow = stopRow
args.columns = columns
args.timestamp = timestamp
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerOpenWithStopTs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerOpenWithStopTs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerOpenWithStopTs failed: unknown result");
def scannerGet(self, id):
"""
Returns the scanner's current row value and advances to the next
row in the table. When there are no more rows in the table, or a key
greater-than-or-equal-to the scanner's specified stopRow is reached,
an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
"""
self.send_scannerGet(id)
return self.recv_scannerGet()
def send_scannerGet(self, id):
self._oprot.writeMessageBegin('scannerGet', TMessageType.CALL, self._seqid)
args = scannerGet_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerGet(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerGet_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerGet failed: unknown result");
def scannerGetList(self, id, nbRows):
"""
Returns, starting at the scanner's current row value nbRows worth of
rows and advances to the next row in the table. When there are no more
rows in the table, or a key greater-than-or-equal-to the scanner's
specified stopRow is reached, an empty list is returned.
@return a TRowResult containing the current row and a map of the columns to TCells.
@throws IllegalArgument if ScannerID is invalid
@throws NotFound when the scanner reaches the end
Parameters:
- id: id of a scanner returned by scannerOpen
- nbRows: number of results to return
"""
self.send_scannerGetList(id, nbRows)
return self.recv_scannerGetList()
def send_scannerGetList(self, id, nbRows):
self._oprot.writeMessageBegin('scannerGetList', TMessageType.CALL, self._seqid)
args = scannerGetList_args()
args.id = id
args.nbRows = nbRows
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerGetList(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerGetList_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "scannerGetList failed: unknown result");
def scannerClose(self, id):
"""
Closes the server-state associated with an open scanner.
@throws IllegalArgument if ScannerID is invalid
Parameters:
- id: id of a scanner returned by scannerOpen
"""
self.send_scannerClose(id)
self.recv_scannerClose()
def send_scannerClose(self, id):
self._oprot.writeMessageBegin('scannerClose', TMessageType.CALL, self._seqid)
args = scannerClose_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_scannerClose(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = scannerClose_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
return
def getRowOrBefore(self, tableName, row, family):
"""
Get the row just before the specified one.
@return value for specified row/column
Parameters:
- tableName: name of table
- row: row key
- family: column name
"""
self.send_getRowOrBefore(tableName, row, family)
return self.recv_getRowOrBefore()
def send_getRowOrBefore(self, tableName, row, family):
self._oprot.writeMessageBegin('getRowOrBefore', TMessageType.CALL, self._seqid)
args = getRowOrBefore_args()
args.tableName = tableName
args.row = row
args.family = family
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRowOrBefore(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRowOrBefore_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRowOrBefore failed: unknown result");
def getRegionInfo(self, row):
"""
Get the regininfo for the specified row. It scans
the metatable to find region's start and end keys.
@return value for specified row/column
Parameters:
- row: row key
"""
self.send_getRegionInfo(row)
return self.recv_getRegionInfo()
def send_getRegionInfo(self, row):
self._oprot.writeMessageBegin('getRegionInfo', TMessageType.CALL, self._seqid)
args = getRegionInfo_args()
args.row = row
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRegionInfo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getRegionInfo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRegionInfo failed: unknown result");
def append(self, append):
"""
Appends values to one or more columns within a single row.
@return values of columns after the append operation.
Parameters:
- append: The single append operation to apply
"""
self.send_append(append)
return self.recv_append()
def send_append(self, append):
self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid)
args = append_args()
args.append = append
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = append_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result");
def checkAndPut(self, tableName, row, column, value, mput, attributes):
"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the corresponding mutation operation for put.
@return true if the new put was executed, false otherwise
Parameters:
- tableName: name of table
- row: row key
- column: column name
- value: the expected value for the column parameter, if not
provided the check is for the non-existence of the
column in question
- mput: mutation for the put
- attributes: Mutation attributes
"""
self.send_checkAndPut(tableName, row, column, value, mput, attributes)
return self.recv_checkAndPut()
def send_checkAndPut(self, tableName, row, column, value, mput, attributes):
self._oprot.writeMessageBegin('checkAndPut', TMessageType.CALL, self._seqid)
args = checkAndPut_args()
args.tableName = tableName
args.row = row
args.column = column
args.value = value
args.mput = mput
args.attributes = attributes
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_checkAndPut(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = checkAndPut_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.io is not None:
raise result.io
if result.ia is not None:
raise result.ia
raise TApplicationException(TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["enableTable"] = Processor.process_enableTable
self._processMap["disableTable"] = Processor.process_disableTable
self._processMap["isTableEnabled"] = Processor.process_isTableEnabled
self._processMap["compact"] = Processor.process_compact
self._processMap["majorCompact"] = Processor.process_majorCompact
self._processMap["getTableNames"] = Processor.process_getTableNames
self._processMap["getColumnDescriptors"] = Processor.process_getColumnDescriptors
self._processMap["getTableRegions"] = Processor.process_getTableRegions
self._processMap["createTable"] = Processor.process_createTable
self._processMap["deleteTable"] = Processor.process_deleteTable
self._processMap["get"] = Processor.process_get
self._processMap["getVer"] = Processor.process_getVer
self._processMap["getVerTs"] = Processor.process_getVerTs
self._processMap["getRow"] = Processor.process_getRow
self._processMap["getRowWithColumns"] = Processor.process_getRowWithColumns
self._processMap["getRowTs"] = Processor.process_getRowTs
self._processMap["getRowWithColumnsTs"] = Processor.process_getRowWithColumnsTs
self._processMap["getRows"] = Processor.process_getRows
self._processMap["getRowsWithColumns"] = Processor.process_getRowsWithColumns
self._processMap["getRowsTs"] = Processor.process_getRowsTs
self._processMap["getRowsWithColumnsTs"] = Processor.process_getRowsWithColumnsTs
self._processMap["mutateRow"] = Processor.process_mutateRow
self._processMap["mutateRowTs"] = Processor.process_mutateRowTs
self._processMap["mutateRows"] = Processor.process_mutateRows
self._processMap["mutateRowsTs"] = Processor.process_mutateRowsTs
self._processMap["atomicIncrement"] = Processor.process_atomicIncrement
self._processMap["deleteAll"] = Processor.process_deleteAll
self._processMap["deleteAllTs"] = Processor.process_deleteAllTs
self._processMap["deleteAllRow"] = Processor.process_deleteAllRow
self._processMap["increment"] = Processor.process_increment
self._processMap["incrementRows"] = Processor.process_incrementRows
self._processMap["deleteAllRowTs"] = Processor.process_deleteAllRowTs
self._processMap["scannerOpenWithScan"] = Processor.process_scannerOpenWithScan
self._processMap["scannerOpen"] = Processor.process_scannerOpen
self._processMap["scannerOpenWithStop"] = Processor.process_scannerOpenWithStop
self._processMap["scannerOpenWithPrefix"] = Processor.process_scannerOpenWithPrefix
self._processMap["scannerOpenTs"] = Processor.process_scannerOpenTs
self._processMap["scannerOpenWithStopTs"] = Processor.process_scannerOpenWithStopTs
self._processMap["scannerGet"] = Processor.process_scannerGet
self._processMap["scannerGetList"] = Processor.process_scannerGetList
self._processMap["scannerClose"] = Processor.process_scannerClose
self._processMap["getRowOrBefore"] = Processor.process_getRowOrBefore
self._processMap["getRegionInfo"] = Processor.process_getRegionInfo
self._processMap["append"] = Processor.process_append
self._processMap["checkAndPut"] = Processor.process_checkAndPut
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_enableTable(self, seqid, iprot, oprot):
args = enableTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = enableTable_result()
try:
self._handler.enableTable(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("enableTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_disableTable(self, seqid, iprot, oprot):
args = disableTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = disableTable_result()
try:
self._handler.disableTable(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("disableTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isTableEnabled(self, seqid, iprot, oprot):
args = isTableEnabled_args()
args.read(iprot)
iprot.readMessageEnd()
result = isTableEnabled_result()
try:
result.success = self._handler.isTableEnabled(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("isTableEnabled", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_compact(self, seqid, iprot, oprot):
args = compact_args()
args.read(iprot)
iprot.readMessageEnd()
result = compact_result()
try:
self._handler.compact(args.tableNameOrRegionName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("compact", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_majorCompact(self, seqid, iprot, oprot):
args = majorCompact_args()
args.read(iprot)
iprot.readMessageEnd()
result = majorCompact_result()
try:
self._handler.majorCompact(args.tableNameOrRegionName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("majorCompact", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableNames(self, seqid, iprot, oprot):
args = getTableNames_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableNames_result()
try:
result.success = self._handler.getTableNames()
except IOError as io:
result.io = io
oprot.writeMessageBegin("getTableNames", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getColumnDescriptors(self, seqid, iprot, oprot):
args = getColumnDescriptors_args()
args.read(iprot)
iprot.readMessageEnd()
result = getColumnDescriptors_result()
try:
result.success = self._handler.getColumnDescriptors(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getColumnDescriptors", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTableRegions(self, seqid, iprot, oprot):
args = getTableRegions_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTableRegions_result()
try:
result.success = self._handler.getTableRegions(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getTableRegions", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createTable(self, seqid, iprot, oprot):
args = createTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = createTable_result()
try:
self._handler.createTable(args.tableName, args.columnFamilies)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
except AlreadyExists as exist:
result.exist = exist
oprot.writeMessageBegin("createTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteTable(self, seqid, iprot, oprot):
args = deleteTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteTable_result()
try:
self._handler.deleteTable(args.tableName)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get(self, seqid, iprot, oprot):
args = get_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_result()
try:
result.success = self._handler.get(args.tableName, args.row, args.column, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("get", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getVer(self, seqid, iprot, oprot):
args = getVer_args()
args.read(iprot)
iprot.readMessageEnd()
result = getVer_result()
try:
result.success = self._handler.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getVer", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getVerTs(self, seqid, iprot, oprot):
args = getVerTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getVerTs_result()
try:
result.success = self._handler.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getVerTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRow(self, seqid, iprot, oprot):
args = getRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRow_result()
try:
result.success = self._handler.getRow(args.tableName, args.row, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRow", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowWithColumns(self, seqid, iprot, oprot):
args = getRowWithColumns_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowWithColumns_result()
try:
result.success = self._handler.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowWithColumns", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowTs(self, seqid, iprot, oprot):
args = getRowTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowTs_result()
try:
result.success = self._handler.getRowTs(args.tableName, args.row, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowWithColumnsTs(self, seqid, iprot, oprot):
args = getRowWithColumnsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowWithColumnsTs_result()
try:
result.success = self._handler.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowWithColumnsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRows(self, seqid, iprot, oprot):
args = getRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRows_result()
try:
result.success = self._handler.getRows(args.tableName, args.rows, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowsWithColumns(self, seqid, iprot, oprot):
args = getRowsWithColumns_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowsWithColumns_result()
try:
result.success = self._handler.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowsWithColumns", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowsTs(self, seqid, iprot, oprot):
args = getRowsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowsTs_result()
try:
result.success = self._handler.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowsWithColumnsTs(self, seqid, iprot, oprot):
args = getRowsWithColumnsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowsWithColumnsTs_result()
try:
result.success = self._handler.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowsWithColumnsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRow(self, seqid, iprot, oprot):
args = mutateRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRow_result()
try:
self._handler.mutateRow(args.tableName, args.row, args.mutations, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRow", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRowTs(self, seqid, iprot, oprot):
args = mutateRowTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRowTs_result()
try:
self._handler.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRowTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRows(self, seqid, iprot, oprot):
args = mutateRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRows_result()
try:
self._handler.mutateRows(args.tableName, args.rowBatches, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mutateRowsTs(self, seqid, iprot, oprot):
args = mutateRowsTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = mutateRowsTs_result()
try:
self._handler.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("mutateRowsTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_atomicIncrement(self, seqid, iprot, oprot):
args = atomicIncrement_args()
args.read(iprot)
iprot.readMessageEnd()
result = atomicIncrement_result()
try:
result.success = self._handler.atomicIncrement(args.tableName, args.row, args.column, args.value)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("atomicIncrement", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAll(self, seqid, iprot, oprot):
args = deleteAll_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAll_result()
try:
self._handler.deleteAll(args.tableName, args.row, args.column, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAll", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAllTs(self, seqid, iprot, oprot):
args = deleteAllTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAllTs_result()
try:
self._handler.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAllTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAllRow(self, seqid, iprot, oprot):
args = deleteAllRow_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAllRow_result()
try:
self._handler.deleteAllRow(args.tableName, args.row, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAllRow", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_increment(self, seqid, iprot, oprot):
args = increment_args()
args.read(iprot)
iprot.readMessageEnd()
result = increment_result()
try:
self._handler.increment(args.increment)
except IOError as io:
result.io = io
oprot.writeMessageBegin("increment", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_incrementRows(self, seqid, iprot, oprot):
args = incrementRows_args()
args.read(iprot)
iprot.readMessageEnd()
result = incrementRows_result()
try:
self._handler.incrementRows(args.increments)
except IOError as io:
result.io = io
oprot.writeMessageBegin("incrementRows", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteAllRowTs(self, seqid, iprot, oprot):
args = deleteAllRowTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAllRowTs_result()
try:
self._handler.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("deleteAllRowTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithScan(self, seqid, iprot, oprot):
args = scannerOpenWithScan_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithScan_result()
try:
result.success = self._handler.scannerOpenWithScan(args.tableName, args.scan, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithScan", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpen(self, seqid, iprot, oprot):
args = scannerOpen_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpen_result()
try:
result.success = self._handler.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpen", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithStop(self, seqid, iprot, oprot):
args = scannerOpenWithStop_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithStop_result()
try:
result.success = self._handler.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithStop", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithPrefix(self, seqid, iprot, oprot):
args = scannerOpenWithPrefix_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithPrefix_result()
try:
result.success = self._handler.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithPrefix", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenTs(self, seqid, iprot, oprot):
args = scannerOpenTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenTs_result()
try:
result.success = self._handler.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerOpenWithStopTs(self, seqid, iprot, oprot):
args = scannerOpenWithStopTs_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerOpenWithStopTs_result()
try:
result.success = self._handler.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns, args.timestamp, args.attributes)
except IOError as io:
result.io = io
oprot.writeMessageBegin("scannerOpenWithStopTs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerGet(self, seqid, iprot, oprot):
args = scannerGet_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerGet_result()
try:
result.success = self._handler.scannerGet(args.id)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("scannerGet", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerGetList(self, seqid, iprot, oprot):
args = scannerGetList_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerGetList_result()
try:
result.success = self._handler.scannerGetList(args.id, args.nbRows)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("scannerGetList", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_scannerClose(self, seqid, iprot, oprot):
args = scannerClose_args()
args.read(iprot)
iprot.readMessageEnd()
result = scannerClose_result()
try:
self._handler.scannerClose(args.id)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("scannerClose", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRowOrBefore(self, seqid, iprot, oprot):
args = getRowOrBefore_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRowOrBefore_result()
try:
result.success = self._handler.getRowOrBefore(args.tableName, args.row, args.family)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRowOrBefore", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRegionInfo(self, seqid, iprot, oprot):
args = getRegionInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRegionInfo_result()
try:
result.success = self._handler.getRegionInfo(args.row)
except IOError as io:
result.io = io
oprot.writeMessageBegin("getRegionInfo", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append(self, seqid, iprot, oprot):
args = append_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_result()
try:
result.success = self._handler.append(args.append)
except IOError as io:
result.io = io
oprot.writeMessageBegin("append", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_checkAndPut(self, seqid, iprot, oprot):
args = checkAndPut_args()
args.read(iprot)
iprot.readMessageEnd()
result = checkAndPut_result()
try:
result.success = self._handler.checkAndPut(args.tableName, args.row, args.column, args.value, args.mput, args.attributes)
except IOError as io:
result.io = io
except IllegalArgument as ia:
result.ia = ia
oprot.writeMessageBegin("checkAndPut", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class enableTable_args(object):
"""
Attributes:
- tableName: name of the table
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enableTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class enableTable_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enableTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class disableTable_args(object):
"""
Attributes:
- tableName: name of the table
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('disableTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class disableTable_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('disableTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isTableEnabled_args(object):
"""
Attributes:
- tableName: name of the table to check
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isTableEnabled_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isTableEnabled_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isTableEnabled_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compact_args(object):
"""
Attributes:
- tableNameOrRegionName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableNameOrRegionName', None, None, ), # 1
)
def __init__(self, tableNameOrRegionName=None,):
self.tableNameOrRegionName = tableNameOrRegionName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableNameOrRegionName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compact_args')
if self.tableNameOrRegionName is not None:
oprot.writeFieldBegin('tableNameOrRegionName', TType.STRING, 1)
oprot.writeString(self.tableNameOrRegionName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class compact_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('compact_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class majorCompact_args(object):
"""
Attributes:
- tableNameOrRegionName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableNameOrRegionName', None, None, ), # 1
)
def __init__(self, tableNameOrRegionName=None,):
self.tableNameOrRegionName = tableNameOrRegionName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableNameOrRegionName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('majorCompact_args')
if self.tableNameOrRegionName is not None:
oprot.writeFieldBegin('tableNameOrRegionName', TType.STRING, 1)
oprot.writeString(self.tableNameOrRegionName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class majorCompact_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('majorCompact_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableNames_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableNames_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableNames_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype47, _size44) = iprot.readListBegin()
for _i48 in xrange(_size44):
_elem49 = iprot.readString();
self.success.append(_elem49)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableNames_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter50 in self.success:
oprot.writeString(iter50)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getColumnDescriptors_args(object):
"""
Attributes:
- tableName: table name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getColumnDescriptors_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getColumnDescriptors_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.STRUCT,(ColumnDescriptor, ColumnDescriptor.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype52, _vtype53, _size51 ) = iprot.readMapBegin()
for _i55 in xrange(_size51):
_key56 = iprot.readString();
_val57 = ColumnDescriptor()
_val57.read(iprot)
self.success[_key56] = _val57
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getColumnDescriptors_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
for kiter58,viter59 in self.success.items():
oprot.writeString(kiter58)
viter59.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableRegions_args(object):
"""
Attributes:
- tableName: table name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableRegions_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTableRegions_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRegionInfo, TRegionInfo.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype63, _size60) = iprot.readListBegin()
for _i64 in xrange(_size60):
_elem65 = TRegionInfo()
_elem65.read(iprot)
self.success.append(_elem65)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTableRegions_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter66 in self.success:
iter66.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createTable_args(object):
"""
Attributes:
- tableName: name of table to create
- columnFamilies: list of column family descriptors
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'columnFamilies', (TType.STRUCT,(ColumnDescriptor, ColumnDescriptor.thrift_spec)), None, ), # 2
)
def __init__(self, tableName=None, columnFamilies=None,):
self.tableName = tableName
self.columnFamilies = columnFamilies
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.columnFamilies = []
(_etype70, _size67) = iprot.readListBegin()
for _i71 in xrange(_size67):
_elem72 = ColumnDescriptor()
_elem72.read(iprot)
self.columnFamilies.append(_elem72)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.columnFamilies is not None:
oprot.writeFieldBegin('columnFamilies', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.columnFamilies))
for iter73 in self.columnFamilies:
iter73.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createTable_result(object):
"""
Attributes:
- io
- ia
- exist
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'exist', (AlreadyExists, AlreadyExists.thrift_spec), None, ), # 3
)
def __init__(self, io=None, ia=None, exist=None,):
self.io = io
self.ia = ia
self.exist = exist
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.exist = AlreadyExists()
self.exist.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
if self.exist is not None:
oprot.writeFieldBegin('exist', TType.STRUCT, 3)
self.exist.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteTable_args(object):
"""
Attributes:
- tableName: name of table to delete
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
)
def __init__(self, tableName=None,):
self.tableName = tableName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteTable_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteTable_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteTable_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, column=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype75, _vtype76, _size74 ) = iprot.readMapBegin()
for _i78 in xrange(_size74):
_key79 = iprot.readString();
_val80 = iprot.readString();
self.attributes[_key79] = _val80
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter81,viter82 in self.attributes.items():
oprot.writeString(kiter81)
oprot.writeString(viter82)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype86, _size83) = iprot.readListBegin()
for _i87 in xrange(_size83):
_elem88 = TCell()
_elem88.read(iprot)
self.success.append(_elem88)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter89 in self.success:
iter89.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVer_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I32, 'numVersions', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, column=None, numVersions=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.numVersions = numVersions
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.numVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype91, _vtype92, _size90 ) = iprot.readMapBegin()
for _i94 in xrange(_size90):
_key95 = iprot.readString();
_val96 = iprot.readString();
self.attributes[_key95] = _val96
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVer_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.numVersions is not None:
oprot.writeFieldBegin('numVersions', TType.I32, 4)
oprot.writeI32(self.numVersions)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter97,viter98 in self.attributes.items():
oprot.writeString(kiter97)
oprot.writeString(viter98)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVer_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype102, _size99) = iprot.readListBegin()
for _i103 in xrange(_size99):
_elem104 = TCell()
_elem104.read(iprot)
self.success.append(_elem104)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVer_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter105 in self.success:
iter105.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVerTs_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- timestamp: timestamp
- numVersions: number of versions to retrieve
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.I32, 'numVersions', None, None, ), # 5
(6, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 6
)
def __init__(self, tableName=None, row=None, column=None, timestamp=None, numVersions=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.timestamp = timestamp
self.numVersions = numVersions
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.numVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.attributes = {}
(_ktype107, _vtype108, _size106 ) = iprot.readMapBegin()
for _i110 in xrange(_size106):
_key111 = iprot.readString();
_val112 = iprot.readString();
self.attributes[_key111] = _val112
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVerTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.numVersions is not None:
oprot.writeFieldBegin('numVersions', TType.I32, 5)
oprot.writeI32(self.numVersions)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter113,viter114 in self.attributes.items():
oprot.writeString(kiter113)
oprot.writeString(viter114)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getVerTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype118, _size115) = iprot.readListBegin()
for _i119 in xrange(_size115):
_elem120 = TCell()
_elem120.read(iprot)
self.success.append(_elem120)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getVerTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter121 in self.success:
iter121.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRow_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, row=None, attributes=None,):
self.tableName = tableName
self.row = row
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype123, _vtype124, _size122 ) = iprot.readMapBegin()
for _i126 in xrange(_size122):
_key127 = iprot.readString();
_val128 = iprot.readString();
self.attributes[_key127] = _val128
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRow_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter129,viter130 in self.attributes.items():
oprot.writeString(kiter129)
oprot.writeString(viter130)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRow_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype134, _size131) = iprot.readListBegin()
for _i135 in xrange(_size131):
_elem136 = TRowResult()
_elem136.read(iprot)
self.success.append(_elem136)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRow_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter137 in self.success:
iter137.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumns_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, columns=None, attributes=None,):
self.tableName = tableName
self.row = row
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype141, _size138) = iprot.readListBegin()
for _i142 in xrange(_size138):
_elem143 = iprot.readString();
self.columns.append(_elem143)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype145, _vtype146, _size144 ) = iprot.readMapBegin()
for _i148 in xrange(_size144):
_key149 = iprot.readString();
_val150 = iprot.readString();
self.attributes[_key149] = _val150
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumns_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter151 in self.columns:
oprot.writeString(iter151)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter152,viter153 in self.attributes.items():
oprot.writeString(kiter152)
oprot.writeString(viter153)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumns_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype157, _size154) = iprot.readListBegin()
for _i158 in xrange(_size154):
_elem159 = TRowResult()
_elem159.read(iprot)
self.success.append(_elem159)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumns_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter160 in self.success:
iter160.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowTs_args(object):
"""
Attributes:
- tableName: name of the table
- row: row key
- timestamp: timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype162, _vtype163, _size161 ) = iprot.readMapBegin()
for _i165 in xrange(_size161):
_key166 = iprot.readString();
_val167 = iprot.readString();
self.attributes[_key166] = _val167
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter168,viter169 in self.attributes.items():
oprot.writeString(kiter168)
oprot.writeString(viter169)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype173, _size170) = iprot.readListBegin()
for _i174 in xrange(_size170):
_elem175 = TRowResult()
_elem175.read(iprot)
self.success.append(_elem175)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter176 in self.success:
iter176.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumnsTs_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype180, _size177) = iprot.readListBegin()
for _i181 in xrange(_size177):
_elem182 = iprot.readString();
self.columns.append(_elem182)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype184, _vtype185, _size183 ) = iprot.readMapBegin()
for _i187 in xrange(_size183):
_key188 = iprot.readString();
_val189 = iprot.readString();
self.attributes[_key188] = _val189
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumnsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter190 in self.columns:
oprot.writeString(iter190)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter191,viter192 in self.attributes.items():
oprot.writeString(kiter191)
oprot.writeString(viter192)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowWithColumnsTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype196, _size193) = iprot.readListBegin()
for _i197 in xrange(_size193):
_elem198 = TRowResult()
_elem198.read(iprot)
self.success.append(_elem198)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowWithColumnsTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter199 in self.success:
iter199.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRows_args(object):
"""
Attributes:
- tableName: name of table
- rows: row keys
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, rows=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype203, _size200) = iprot.readListBegin()
for _i204 in xrange(_size200):
_elem205 = iprot.readString();
self.rows.append(_elem205)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype207, _vtype208, _size206 ) = iprot.readMapBegin()
for _i210 in xrange(_size206):
_key211 = iprot.readString();
_val212 = iprot.readString();
self.attributes[_key211] = _val212
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRows_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter213 in self.rows:
oprot.writeString(iter213)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter214,viter215 in self.attributes.items():
oprot.writeString(kiter214)
oprot.writeString(viter215)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRows_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype219, _size216) = iprot.readListBegin()
for _i220 in xrange(_size216):
_elem221 = TRowResult()
_elem221.read(iprot)
self.success.append(_elem221)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRows_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter222 in self.success:
iter222.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumns_args(object):
"""
Attributes:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, rows=None, columns=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype226, _size223) = iprot.readListBegin()
for _i227 in xrange(_size223):
_elem228 = iprot.readString();
self.rows.append(_elem228)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype232, _size229) = iprot.readListBegin()
for _i233 in xrange(_size229):
_elem234 = iprot.readString();
self.columns.append(_elem234)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype236, _vtype237, _size235 ) = iprot.readMapBegin()
for _i239 in xrange(_size235):
_key240 = iprot.readString();
_val241 = iprot.readString();
self.attributes[_key240] = _val241
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumns_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter242 in self.rows:
oprot.writeString(iter242)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter243 in self.columns:
oprot.writeString(iter243)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter244,viter245 in self.attributes.items():
oprot.writeString(kiter244)
oprot.writeString(viter245)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumns_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype249, _size246) = iprot.readListBegin()
for _i250 in xrange(_size246):
_elem251 = TRowResult()
_elem251.read(iprot)
self.success.append(_elem251)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumns_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter252 in self.success:
iter252.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsTs_args(object):
"""
Attributes:
- tableName: name of the table
- rows: row keys
- timestamp: timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, rows=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype256, _size253) = iprot.readListBegin()
for _i257 in xrange(_size253):
_elem258 = iprot.readString();
self.rows.append(_elem258)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype260, _vtype261, _size259 ) = iprot.readMapBegin()
for _i263 in xrange(_size259):
_key264 = iprot.readString();
_val265 = iprot.readString();
self.attributes[_key264] = _val265
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter266 in self.rows:
oprot.writeString(iter266)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter267,viter268 in self.attributes.items():
oprot.writeString(kiter267)
oprot.writeString(viter268)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype272, _size269) = iprot.readListBegin()
for _i273 in xrange(_size269):
_elem274 = TRowResult()
_elem274.read(iprot)
self.success.append(_elem274)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter275 in self.success:
iter275.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumnsTs_args(object):
"""
Attributes:
- tableName: name of table
- rows: row keys
- columns: List of columns to return, null for all columns
- timestamp
- attributes: Get attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rows', (TType.STRING,None), None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, rows=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.rows = rows
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rows = []
(_etype279, _size276) = iprot.readListBegin()
for _i280 in xrange(_size276):
_elem281 = iprot.readString();
self.rows.append(_elem281)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype285, _size282) = iprot.readListBegin()
for _i286 in xrange(_size282):
_elem287 = iprot.readString();
self.columns.append(_elem287)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype289, _vtype290, _size288 ) = iprot.readMapBegin()
for _i292 in xrange(_size288):
_key293 = iprot.readString();
_val294 = iprot.readString();
self.attributes[_key293] = _val294
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumnsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rows is not None:
oprot.writeFieldBegin('rows', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.rows))
for iter295 in self.rows:
oprot.writeString(iter295)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter296 in self.columns:
oprot.writeString(iter296)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter297,viter298 in self.attributes.items():
oprot.writeString(kiter297)
oprot.writeString(viter298)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowsWithColumnsTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype302, _size299) = iprot.readListBegin()
for _i303 in xrange(_size299):
_elem304 = TRowResult()
_elem304.read(iprot)
self.success.append(_elem304)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowsWithColumnsTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter305 in self.success:
iter305.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRow_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, mutations=None, attributes=None,):
self.tableName = tableName
self.row = row
self.mutations = mutations
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.mutations = []
(_etype309, _size306) = iprot.readListBegin()
for _i310 in xrange(_size306):
_elem311 = Mutation()
_elem311.read(iprot)
self.mutations.append(_elem311)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype313, _vtype314, _size312 ) = iprot.readMapBegin()
for _i316 in xrange(_size312):
_key317 = iprot.readString();
_val318 = iprot.readString();
self.attributes[_key317] = _val318
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRow_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter319 in self.mutations:
iter319.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter320,viter321 in self.attributes.items():
oprot.writeString(kiter320)
oprot.writeString(viter321)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRow_result(object):
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRow_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowTs_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- mutations: list of mutation commands
- timestamp: timestamp
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, mutations=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.mutations = mutations
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.mutations = []
(_etype325, _size322) = iprot.readListBegin()
for _i326 in xrange(_size322):
_elem327 = Mutation()
_elem327.read(iprot)
self.mutations.append(_elem327)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype329, _vtype330, _size328 ) = iprot.readMapBegin()
for _i332 in xrange(_size328):
_key333 = iprot.readString();
_val334 = iprot.readString();
self.attributes[_key333] = _val334
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter335 in self.mutations:
iter335.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter336,viter337 in self.attributes.items():
oprot.writeString(kiter336)
oprot.writeString(viter337)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowTs_result(object):
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRows_args(object):
"""
Attributes:
- tableName: name of table
- rowBatches: list of row batches
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rowBatches', (TType.STRUCT,(BatchMutation, BatchMutation.thrift_spec)), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, rowBatches=None, attributes=None,):
self.tableName = tableName
self.rowBatches = rowBatches
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rowBatches = []
(_etype341, _size338) = iprot.readListBegin()
for _i342 in xrange(_size338):
_elem343 = BatchMutation()
_elem343.read(iprot)
self.rowBatches.append(_elem343)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype345, _vtype346, _size344 ) = iprot.readMapBegin()
for _i348 in xrange(_size344):
_key349 = iprot.readString();
_val350 = iprot.readString();
self.attributes[_key349] = _val350
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRows_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rowBatches is not None:
oprot.writeFieldBegin('rowBatches', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.rowBatches))
for iter351 in self.rowBatches:
iter351.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter352,viter353 in self.attributes.items():
oprot.writeString(kiter352)
oprot.writeString(viter353)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRows_result(object):
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRows_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowsTs_args(object):
"""
Attributes:
- tableName: name of table
- rowBatches: list of row batches
- timestamp: timestamp
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.LIST, 'rowBatches', (TType.STRUCT,(BatchMutation, BatchMutation.thrift_spec)), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, rowBatches=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.rowBatches = rowBatches
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.rowBatches = []
(_etype357, _size354) = iprot.readListBegin()
for _i358 in xrange(_size354):
_elem359 = BatchMutation()
_elem359.read(iprot)
self.rowBatches.append(_elem359)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype361, _vtype362, _size360 ) = iprot.readMapBegin()
for _i364 in xrange(_size360):
_key365 = iprot.readString();
_val366 = iprot.readString();
self.attributes[_key365] = _val366
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowsTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.rowBatches is not None:
oprot.writeFieldBegin('rowBatches', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.rowBatches))
for iter367 in self.rowBatches:
iter367.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter368,viter369 in self.attributes.items():
oprot.writeString(kiter368)
oprot.writeString(viter369)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mutateRowsTs_result(object):
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mutateRowsTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class atomicIncrement_args(object):
"""
Attributes:
- tableName: name of table
- row: row to increment
- column: name of column
- value: amount to increment by
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'value', None, None, ), # 4
)
def __init__(self, tableName=None, row=None, column=None, value=None,):
self.tableName = tableName
self.row = row
self.column = column
self.value = value
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.value = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('atomicIncrement_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.I64, 4)
oprot.writeI64(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class atomicIncrement_result(object):
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('atomicIncrement_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAll_args(object):
"""
Attributes:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, column=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype371, _vtype372, _size370 ) = iprot.readMapBegin()
for _i374 in xrange(_size370):
_key375 = iprot.readString();
_val376 = iprot.readString();
self.attributes[_key375] = _val376
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAll_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter377,viter378 in self.attributes.items():
oprot.writeString(kiter377)
oprot.writeString(viter378)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAll_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAll_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllTs_args(object):
"""
Attributes:
- tableName: name of table
- row: Row to update
- column: name of column whose value is to be deleted
- timestamp: timestamp
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, row=None, column=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype380, _vtype381, _size379 ) = iprot.readMapBegin()
for _i383 in xrange(_size379):
_key384 = iprot.readString();
_val385 = iprot.readString();
self.attributes[_key384] = _val385
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter386,viter387 in self.attributes.items():
oprot.writeString(kiter386)
oprot.writeString(viter387)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllTs_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRow_args(object):
"""
Attributes:
- tableName: name of table
- row: key of the row to be completely deleted.
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, row=None, attributes=None,):
self.tableName = tableName
self.row = row
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype389, _vtype390, _size388 ) = iprot.readMapBegin()
for _i392 in xrange(_size388):
_key393 = iprot.readString();
_val394 = iprot.readString();
self.attributes[_key393] = _val394
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRow_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter395,viter396 in self.attributes.items():
oprot.writeString(kiter395)
oprot.writeString(viter396)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRow_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRow_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_args(object):
"""
Attributes:
- increment: The single increment to apply
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'increment', (TIncrement, TIncrement.thrift_spec), None, ), # 1
)
def __init__(self, increment=None,):
self.increment = increment
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.increment = TIncrement()
self.increment.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_args')
if self.increment is not None:
oprot.writeFieldBegin('increment', TType.STRUCT, 1)
self.increment.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class increment_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('increment_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class incrementRows_args(object):
"""
Attributes:
- increments: The list of increments
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'increments', (TType.STRUCT,(TIncrement, TIncrement.thrift_spec)), None, ), # 1
)
def __init__(self, increments=None,):
self.increments = increments
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.increments = []
(_etype400, _size397) = iprot.readListBegin()
for _i401 in xrange(_size397):
_elem402 = TIncrement()
_elem402.read(iprot)
self.increments.append(_elem402)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('incrementRows_args')
if self.increments is not None:
oprot.writeFieldBegin('increments', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.increments))
for iter403 in self.increments:
iter403.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class incrementRows_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('incrementRows_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRowTs_args(object):
"""
Attributes:
- tableName: name of table
- row: key of the row to be completely deleted.
- timestamp: timestamp
- attributes: Delete attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, row=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.row = row
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype405, _vtype406, _size404 ) = iprot.readMapBegin()
for _i408 in xrange(_size404):
_key409 = iprot.readString();
_val410 = iprot.readString();
self.attributes[_key409] = _val410
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRowTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter411,viter412 in self.attributes.items():
oprot.writeString(kiter411)
oprot.writeString(viter412)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAllRowTs_result(object):
"""
Attributes:
- io
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, io=None,):
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAllRowTs_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithScan_args(object):
"""
Attributes:
- tableName: name of table
- scan: Scan instance
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRUCT, 'scan', (TScan, TScan.thrift_spec), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, tableName=None, scan=None, attributes=None,):
self.tableName = tableName
self.scan = scan
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.scan = TScan()
self.scan.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.attributes = {}
(_ktype414, _vtype415, _size413 ) = iprot.readMapBegin()
for _i417 in xrange(_size413):
_key418 = iprot.readString();
_val419 = iprot.readString();
self.attributes[_key418] = _val419
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithScan_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.scan is not None:
oprot.writeFieldBegin('scan', TType.STRUCT, 2)
self.scan.write(oprot)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter420,viter421 in self.attributes.items():
oprot.writeString(kiter420)
oprot.writeString(viter421)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithScan_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithScan_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpen_args(object):
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, startRow=None, columns=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype425, _size422) = iprot.readListBegin()
for _i426 in xrange(_size422):
_elem427 = iprot.readString();
self.columns.append(_elem427)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype429, _vtype430, _size428 ) = iprot.readMapBegin()
for _i432 in xrange(_size428):
_key433 = iprot.readString();
_val434 = iprot.readString();
self.attributes[_key433] = _val434
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpen_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter435 in self.columns:
oprot.writeString(iter435)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter436,viter437 in self.attributes.items():
oprot.writeString(kiter436)
oprot.writeString(viter437)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpen_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpen_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStop_args(object):
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.STRING, 'stopRow', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, startRow=None, stopRow=None, columns=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.stopRow = stopRow
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype441, _size438) = iprot.readListBegin()
for _i442 in xrange(_size438):
_elem443 = iprot.readString();
self.columns.append(_elem443)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype445, _vtype446, _size444 ) = iprot.readMapBegin()
for _i448 in xrange(_size444):
_key449 = iprot.readString();
_val450 = iprot.readString();
self.attributes[_key449] = _val450
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStop_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 3)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter451 in self.columns:
oprot.writeString(iter451)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter452,viter453 in self.attributes.items():
oprot.writeString(kiter452)
oprot.writeString(viter453)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStop_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStop_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithPrefix_args(object):
"""
Attributes:
- tableName: name of table
- startAndPrefix: the prefix (and thus start row) of the keys you want
- columns: the columns you want returned
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startAndPrefix', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 4
)
def __init__(self, tableName=None, startAndPrefix=None, columns=None, attributes=None,):
self.tableName = tableName
self.startAndPrefix = startAndPrefix
self.columns = columns
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startAndPrefix = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype457, _size454) = iprot.readListBegin()
for _i458 in xrange(_size454):
_elem459 = iprot.readString();
self.columns.append(_elem459)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.attributes = {}
(_ktype461, _vtype462, _size460 ) = iprot.readMapBegin()
for _i464 in xrange(_size460):
_key465 = iprot.readString();
_val466 = iprot.readString();
self.attributes[_key465] = _val466
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithPrefix_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startAndPrefix is not None:
oprot.writeFieldBegin('startAndPrefix', TType.STRING, 2)
oprot.writeString(self.startAndPrefix)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter467 in self.columns:
oprot.writeString(iter467)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter468,viter469 in self.attributes.items():
oprot.writeString(kiter468)
oprot.writeString(viter469)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithPrefix_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithPrefix_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenTs_args(object):
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.LIST, 'columns', (TType.STRING,None), None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 5
)
def __init__(self, tableName=None, startRow=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.columns = []
(_etype473, _size470) = iprot.readListBegin()
for _i474 in xrange(_size470):
_elem475 = iprot.readString();
self.columns.append(_elem475)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.attributes = {}
(_ktype477, _vtype478, _size476 ) = iprot.readMapBegin()
for _i480 in xrange(_size476):
_key481 = iprot.readString();
_val482 = iprot.readString();
self.attributes[_key481] = _val482
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter483 in self.columns:
oprot.writeString(iter483)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 4)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter484,viter485 in self.attributes.items():
oprot.writeString(kiter484)
oprot.writeString(viter485)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStopTs_args(object):
"""
Attributes:
- tableName: name of table
- startRow: Starting row in table to scan.
Send "" (empty string) to start at the first row.
- stopRow: row to stop scanning on. This row is *not* included in the
scanner's results
- columns: columns to scan. If column name is a column family, all
columns of the specified column family are returned. It's also possible
to pass a regex in the column qualifier.
- timestamp: timestamp
- attributes: Scan attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'startRow', None, None, ), # 2
(3, TType.STRING, 'stopRow', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.I64, 'timestamp', None, None, ), # 5
(6, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 6
)
def __init__(self, tableName=None, startRow=None, stopRow=None, columns=None, timestamp=None, attributes=None,):
self.tableName = tableName
self.startRow = startRow
self.stopRow = stopRow
self.columns = columns
self.timestamp = timestamp
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype489, _size486) = iprot.readListBegin()
for _i490 in xrange(_size486):
_elem491 = iprot.readString();
self.columns.append(_elem491)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.attributes = {}
(_ktype493, _vtype494, _size492 ) = iprot.readMapBegin()
for _i496 in xrange(_size492):
_key497 = iprot.readString();
_val498 = iprot.readString();
self.attributes[_key497] = _val498
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStopTs_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 2)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 3)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter499 in self.columns:
oprot.writeString(iter499)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 5)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter500,viter501 in self.attributes.items():
oprot.writeString(kiter500)
oprot.writeString(viter501)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerOpenWithStopTs_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerOpenWithStopTs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGet_args(object):
"""
Attributes:
- id: id of a scanner returned by scannerOpen
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGet_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGet_result(object):
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype505, _size502) = iprot.readListBegin()
for _i506 in xrange(_size502):
_elem507 = TRowResult()
_elem507.read(iprot)
self.success.append(_elem507)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGet_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter508 in self.success:
iter508.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGetList_args(object):
"""
Attributes:
- id: id of a scanner returned by scannerOpen
- nbRows: number of results to return
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
(2, TType.I32, 'nbRows', None, None, ), # 2
)
def __init__(self, id=None, nbRows=None,):
self.id = id
self.nbRows = nbRows
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.nbRows = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGetList_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
if self.nbRows is not None:
oprot.writeFieldBegin('nbRows', TType.I32, 2)
oprot.writeI32(self.nbRows)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerGetList_result(object):
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TRowResult, TRowResult.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype512, _size509) = iprot.readListBegin()
for _i513 in xrange(_size509):
_elem514 = TRowResult()
_elem514.read(iprot)
self.success.append(_elem514)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerGetList_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter515 in self.success:
iter515.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerClose_args(object):
"""
Attributes:
- id: id of a scanner returned by scannerOpen
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerClose_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class scannerClose_result(object):
"""
Attributes:
- io
- ia
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, io=None, ia=None,):
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('scannerClose_result')
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowOrBefore_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- family: column name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'family', None, None, ), # 3
)
def __init__(self, tableName=None, row=None, family=None,):
self.tableName = tableName
self.row = row
self.family = family
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.family = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowOrBefore_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.family is not None:
oprot.writeFieldBegin('family', TType.STRING, 3)
oprot.writeString(self.family)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRowOrBefore_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype519, _size516) = iprot.readListBegin()
for _i520 in xrange(_size516):
_elem521 = TCell()
_elem521.read(iprot)
self.success.append(_elem521)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRowOrBefore_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter522 in self.success:
iter522.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRegionInfo_args(object):
"""
Attributes:
- row: row key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
)
def __init__(self, row=None,):
self.row = row
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRegionInfo_args')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRegionInfo_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TRegionInfo, TRegionInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TRegionInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getRegionInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class append_args(object):
"""
Attributes:
- append: The single append operation to apply
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'append', (TAppend, TAppend.thrift_spec), None, ), # 1
)
def __init__(self, append=None,):
self.append = append
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.append = TAppend()
self.append.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('append_args')
if self.append is not None:
oprot.writeFieldBegin('append', TType.STRUCT, 1)
self.append.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class append_result(object):
"""
Attributes:
- success
- io
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
)
def __init__(self, success=None, io=None,):
self.success = success
self.io = io
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype526, _size523) = iprot.readListBegin()
for _i527 in xrange(_size523):
_elem528 = TCell()
_elem528.read(iprot)
self.success.append(_elem528)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('append_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter529 in self.success:
iter529.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndPut_args(object):
"""
Attributes:
- tableName: name of table
- row: row key
- column: column name
- value: the expected value for the column parameter, if not
provided the check is for the non-existence of the
column in question
- mput: mutation for the put
- attributes: Mutation attributes
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'tableName', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
None, # 4
(5, TType.STRING, 'value', None, None, ), # 5
(6, TType.STRUCT, 'mput', (Mutation, Mutation.thrift_spec), None, ), # 6
(7, TType.MAP, 'attributes', (TType.STRING,None,TType.STRING,None), None, ), # 7
)
def __init__(self, tableName=None, row=None, column=None, value=None, mput=None, attributes=None,):
self.tableName = tableName
self.row = row
self.column = column
self.value = value
self.mput = mput
self.attributes = attributes
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.tableName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.mput = Mutation()
self.mput.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.MAP:
self.attributes = {}
(_ktype531, _vtype532, _size530 ) = iprot.readMapBegin()
for _i534 in xrange(_size530):
_key535 = iprot.readString();
_val536 = iprot.readString();
self.attributes[_key535] = _val536
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndPut_args')
if self.tableName is not None:
oprot.writeFieldBegin('tableName', TType.STRING, 1)
oprot.writeString(self.tableName)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 5)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.mput is not None:
oprot.writeFieldBegin('mput', TType.STRUCT, 6)
self.mput.write(oprot)
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.MAP, 7)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.attributes))
for kiter537,viter538 in self.attributes.items():
oprot.writeString(kiter537)
oprot.writeString(viter538)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class checkAndPut_result(object):
"""
Attributes:
- success
- io
- ia
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'io', (IOError, IOError.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ia', (IllegalArgument, IllegalArgument.thrift_spec), None, ), # 2
)
def __init__(self, success=None, io=None, ia=None,):
self.success = success
self.io = io
self.ia = ia
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.io = IOError()
self.io.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ia = IllegalArgument()
self.ia.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('checkAndPut_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.io is not None:
oprot.writeFieldBegin('io', TType.STRUCT, 1)
self.io.write(oprot)
oprot.writeFieldEnd()
if self.ia is not None:
oprot.writeFieldBegin('ia', TType.STRUCT, 2)
self.ia.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
/sasl-happybase-1.0.tar.gz/sasl-happybase-1.0/happybase/hbase/Hbase.py
| 0.88996 | 0.4474 |
Hbase.py
|
pypi
|
<div align="center">
<img src="img/logo.png">
</div>
<div align="center">
[ ](https://www.python.org/)
[](https://github.com/sassoftware/sasoptpy/issues) <br>
[](https://github.com/sassoftware/sasoptpy/releases)
[](https://github.com/sassoftware/sasoptpy/releases)
[](https://github.com/sassoftware/sasoptpy/tags) <br>
[](https://github.com/sassoftware/sasoptpy/blob/master/LICENSE)
[](https://communities.sas.com/t5/Mathematical-Optimization/bd-p/operations_research)
[](https://mybinder.org/v2/gh/sassoftware/sasoptpy/master)
</div>
## Overview
sasoptpy is the Python interface for SAS Optimization and SAS/OR solvers. It enables developers to quickly formulate and solve mathematical optimization problems by using native Python data structures. sasoptpy works with both client-side and server-side data, and it allows concurrency, making it a great tool for working with both small and large projects.
## Features
- Supports several optimization problem types:
- Linear optimization (LP)
- Mixed integer linear optimization (MILP)
- Nonlinear optimization (NLP)
- Quadratic optimization (QP)
- Works with both client-side and server-side data
- Allows abstract modeling with run-time actions
- Supports workspaces, enabling you to run multiple problems concurrently
- Provides wrapper for tuning MILP solver parameters
## Flow
#### Concrete Model
<div align="center">
<img src="img/flow-animation-concrete.gif">
</div>
Using native Python functionality, you can model an optimization problem on the client and solve it in SAS Viya or SAS 9.4.
The problem is fully generated on the client side, and the computationally expensive part is handled by the optimization solver.
#### Abstract Model
<div align="center">
<img src="img/flow-animation-abstract.gif">
</div>
If you have the data available on the server, you can model an abstract problem and cut the model generation time significantly.
You can also benefit from solving several problems concurrently.
## Installation
You can install sasoptpy via PyPI, via Conda, or by cloning from the repository.
- PyPI
``` sh
pip install sasoptpy
```
- Conda
``` sh
conda install -c sas-institute sasoptpy
```
- Repository
``` sh
git clone https://github.com/sassoftware/sasoptpy.git
cd sasoptpy/
python3 setup.py install
```
## Examples
### 1. Team Selection Problem
<a href="#">
<img align="right" src="img/example_main.png">
</a>
In many team sports, such as soccer, basketball, and e-sports, a common task is to choose team members from the pool of available players. In the following example, consider a generic problem, where the decision maker is trying to sign three players from hundreds of candidates. The objective is to maximize the total rating of the team.
The problem summary is as follows:
- Data
- List of players along with their attributes, desired position(s), and contract price
- Budget limit
- Decision
- Choosing a player to sign for each position
- Constraints
- Total signing cost should not exceed the budget limit
- Players can play only their desired position
<div align="center">
<img src="img/squad_problem_table.png">
</div>
**Objective** is to maximize the team rating. The team rating is the quadratic sum of position pair ratings.
<div align="center">
<img src="img/squad_problem.png">
</div>
<div align="center">
<img src="img/squad_problem_obj.png">
</div>
[Jupyter notebook](https://github.com/sassoftware/sasoptpy/blob/master/examples/notebooks/TeamSelection.ipynb) shows how this problem is solved using a simple linearization and the SAS Optimization MILP solver. [(nbviewer)](https://nbviewer.jupyter.org/github/sassoftware/sasoptpy/blob/master/examples/notebooks/TeamSelection.ipynb)
<div align="center">
<a href="https://nbviewer.jupyter.org/github/sassoftware/sasoptpy/blob/master/examples/notebooks/TeamSelection.ipynb">
<img src="img/squad_example.gif">
</a>
</div>
### 2. Diet Problem
The diet problem, also known as the Stigler diet problem, is one of the earliest optimization problems in the literature. George J. Stigler originally posed the question of how to find the cheapest diet while satisfying the minimum nutritional requirements (Stigler 1945).
This well-known problem can be solved easily with linear optimization. Because the methodology had not been developed in 1937, Stigler solved this problem by using heuristics. And although his solution was not the optimal (best) solution, he missed the best solution by only 24 cents (per year).
You can see how this problem can be written in terms of mathematical equations and fed into SAS Optimization solvers by using the modeling capabilities of the sasoptpy package in [Jupyter Notebook](https://github.com/sassoftware/sasoptpy/blob/master/examples/notebooks/DietProblem.ipynb). [(nbviewer)](https://nbviewer.jupyter.org/github/sassoftware/sasoptpy/blob/master/examples/notebooks/DietProblem.ipynb)
<div align="center">
<a href="https://nbviewer.jupyter.org/github/sassoftware/sasoptpy/blob/master/examples/notebooks/DietProblem.ipynb">
<img src="img/diet_example.gif">
</a>
</div>
<br>
<div align="center">
<a href="https://sassoftware.github.io/sasoptpy/examples/examples.html"><img src="img/more_examples.png"></a>
</div>
## Contributions
We welcome all contributions, including bug reports, new features, documentation fixes, performance enchancements, and new ideas.
If you have something to share, we accept pull requests on Github. See the [Contributing Instructions](CONTRIBUTING.md) for more details. See the [Contributor Agreement](ContributorAgreement.txt) for more details about our code of conduct.
## Tests
Unit tests are mainly intended for internal testing purposes. If your environment variables are set, you can use `unittest` to test the health of a commit or to test the code coverage. See [tests README](tests/README.md) for more details.
## Documentation
The official documentation is hosted at Github Pages: https://sassoftware.github.io/sasoptpy/.
A PDF version is also available: https://sassoftware.github.io/sasoptpy/sasoptpy.pdf.
The documentation is automatically generated using [Sphinx](https://www.sphinx-doc.org/en/master/). All class, method, and function APIs are provided in the source code. The main structure can be found in the `doc` folder.
## License
This package is published under Apache 2.0 license. See [LICENSE](LICENSE.md) for details.
---
Copyright © SAS Institute Inc.
|
/sasoptpy-1.0.5.tar.gz/sasoptpy-1.0.5/README.md
| 0.835148 | 0.959078 |
README.md
|
pypi
|
import sasoptpy as so
import pandas as pd
def test(cas_conn):
m = so.Model(name='farm_planning', session=cas_conn)
# Input Data
cow_data_raw = []
for age in range(12):
if age < 2:
row = {'age': age,
'init_num_cows': 10,
'acres_needed': 2/3.0,
'annual_loss': 0.05,
'bullock_yield': 0,
'heifer_yield': 0,
'milk_revenue': 0,
'grain_req': 0,
'sugar_beet_req': 0,
'labour_req': 10,
'other_costs': 50}
else:
row = {'age': age,
'init_num_cows': 10,
'acres_needed': 1,
'annual_loss': 0.02,
'bullock_yield': 1.1/2,
'heifer_yield': 1.1/2,
'milk_revenue': 370,
'grain_req': 0.6,
'sugar_beet_req': 0.7,
'labour_req': 42,
'other_costs': 100}
cow_data_raw.append(row)
cow_data = pd.DataFrame(cow_data_raw).set_index(['age'])
grain_data = pd.DataFrame([
['group1', 20, 1.1],
['group2', 30, 0.9],
['group3', 20, 0.8],
['group4', 10, 0.65]
], columns=['group', 'acres', 'yield']).set_index(['group'])
num_years = 5
num_acres = 200
bullock_revenue = 30
heifer_revenue = 40
dairy_cow_selling_age = 12
dairy_cow_selling_revenue = 120
max_num_cows = 130
sugar_beet_yield = 1.5
grain_cost = 90
grain_revenue = 75
grain_labour_req = 4
grain_other_costs = 15
sugar_beet_cost = 70
sugar_beet_revenue = 58
sugar_beet_labour_req = 14
sugar_beet_other_costs = 10
nominal_labour_cost = 4000
nominal_labour_hours = 5500
excess_labour_cost = 1.2
capital_outlay_unit = 200
num_loan_years = 10
annual_interest_rate = 0.15
max_decrease_ratio = 0.50
max_increase_ratio = 0.75
# Sets
AGES = cow_data.index.tolist()
init_num_cows = cow_data['init_num_cows']
acres_needed = cow_data['acres_needed']
annual_loss = cow_data['annual_loss']
bullock_yield = cow_data['bullock_yield']
heifer_yield = cow_data['heifer_yield']
milk_revenue = cow_data['milk_revenue']
grain_req = cow_data['grain_req']
sugar_beet_req = cow_data['sugar_beet_req']
cow_labour_req = cow_data['labour_req']
cow_other_costs = cow_data['other_costs']
YEARS = list(range(1, num_years+1))
YEARS0 = [0] + YEARS
# Variables
numCows = m.add_variables(AGES + [dairy_cow_selling_age], YEARS0, lb=0,
name='numCows')
for age in AGES:
numCows[age, 0].set_bounds(lb=init_num_cows[age],
ub=init_num_cows[age])
numCows[dairy_cow_selling_age, 0].set_bounds(lb=0, ub=0)
numBullocksSold = m.add_variables(YEARS, lb=0, name='numBullocksSold')
numHeifersSold = m.add_variables(YEARS, lb=0, name='numHeifersSold')
GROUPS = grain_data.index.tolist()
acres = grain_data['acres']
grain_yield = grain_data['yield']
grainAcres = m.add_variables(GROUPS, YEARS, lb=0, name='grainAcres')
for group in GROUPS:
for year in YEARS:
grainAcres[group, year].set_bounds(ub=acres[group])
grainBought = m.add_variables(YEARS, lb=0, name='grainBought')
grainSold = m.add_variables(YEARS, lb=0, name='grainSold')
sugarBeetAcres = m.add_variables(YEARS, lb=0, name='sugarBeetAcres')
sugarBeetBought = m.add_variables(YEARS, lb=0, name='sugarBeetBought')
sugarBeetSold = m.add_variables(YEARS, lb=0, name='sugarBeetSold')
numExcessLabourHours = m.add_variables(YEARS, lb=0,
name='numExcessLabourHours')
capitalOutlay = m.add_variables(YEARS, lb=0, name='capitalOutlay')
yearly_loan_payment = (annual_interest_rate * capital_outlay_unit) /\
(1 - (1+annual_interest_rate)**(-num_loan_years))
# Objective function
revenue = {year:
bullock_revenue * numBullocksSold[year] +
heifer_revenue * numHeifersSold[year] +
dairy_cow_selling_revenue * numCows[dairy_cow_selling_age,
year] +
so.expr_sum(milk_revenue[age] * numCows[age, year]
for age in AGES) +
grain_revenue * grainSold[year] +
sugar_beet_revenue * sugarBeetSold[year]
for year in YEARS}
cost = {year:
grain_cost * grainBought[year] +
sugar_beet_cost * sugarBeetBought[year] +
nominal_labour_cost +
excess_labour_cost * numExcessLabourHours[year] +
so.expr_sum(cow_other_costs[age] * numCows[age, year]
for age in AGES) +
so.expr_sum(grain_other_costs * grainAcres[group, year]
for group in GROUPS) +
sugar_beet_other_costs * sugarBeetAcres[year] +
so.expr_sum(yearly_loan_payment * capitalOutlay[y]
for y in YEARS if y <= year)
for year in YEARS}
profit = {year: revenue[year] - cost[year] for year in YEARS}
totalProfit = so.expr_sum(profit[year] -
yearly_loan_payment * (num_years - 1 + year) *
capitalOutlay[year] for year in YEARS)
m.set_objective(totalProfit, sense=so.MAX, name='totalProfit')
# Constraints
m.add_constraints((
so.expr_sum(acres_needed[age] * numCows[age, year] for age in AGES) +
so.expr_sum(grainAcres[group, year] for group in GROUPS) +
sugarBeetAcres[year] <= num_acres
for year in YEARS), name='num_acres')
m.add_constraints((
numCows[age+1, year+1] == (1-annual_loss[age]) * numCows[age, year]
for age in AGES if age != dairy_cow_selling_age
for year in YEARS0 if year != num_years), name='aging')
m.add_constraints((
numBullocksSold[year] == so.expr_sum(
bullock_yield[age] * numCows[age, year] for age in AGES)
for year in YEARS), name='numBullocksSold_def')
m.add_constraints((
numCows[0, year] == so.expr_sum(
heifer_yield[age] * numCows[age, year]
for age in AGES) - numHeifersSold[year]
for year in YEARS), name='numHeifersSold_def')
m.add_constraints((
so.expr_sum(numCows[age, year] for age in AGES) <= max_num_cows +
so.expr_sum(capitalOutlay[y] for y in YEARS if y <= year)
for year in YEARS), name='max_num_cows_def')
grainGrown = {(group, year): grain_yield[group] * grainAcres[group, year]
for group in GROUPS for year in YEARS}
m.add_constraints((
so.expr_sum(grain_req[age] * numCows[age, year] for age in AGES) <=
so.expr_sum(grainGrown[group, year] for group in GROUPS)
+ grainBought[year] - grainSold[year]
for year in YEARS), name='grain_req_def')
sugarBeetGrown = {(year): sugar_beet_yield * sugarBeetAcres[year]
for year in YEARS}
m.add_constraints((
so.expr_sum(sugar_beet_req[age] * numCows[age, year] for age in AGES)
<=
sugarBeetGrown[year] + sugarBeetBought[year] - sugarBeetSold[year]
for year in YEARS), name='sugar_beet_req_def')
m.add_constraints((
so.expr_sum(cow_labour_req[age] * numCows[age, year]
for age in AGES) +
so.expr_sum(grain_labour_req * grainAcres[group, year]
for group in GROUPS) +
sugar_beet_labour_req * sugarBeetAcres[year] <=
nominal_labour_hours + numExcessLabourHours[year]
for year in YEARS), name='labour_req_def')
m.add_constraints((profit[year] >= 0 for year in YEARS), name='cash_flow')
m.add_constraint(so.expr_sum(numCows[age, num_years] for age in AGES
if age >= 2) /
sum(init_num_cows[age] for age in AGES if age >= 2) ==
[1-max_decrease_ratio, 1+max_increase_ratio],
name='final_dairy_cows_range')
res = m.solve()
if res is not None:
so.pd.display_all()
print(so.get_solution_table(numCows))
revenue_df = so.dict_to_frame(revenue, cols=['revenue'])
cost_df = so.dict_to_frame(cost, cols=['cost'])
profit_df = so.dict_to_frame(profit, cols=['profit'])
print(so.get_solution_table(numBullocksSold, numHeifersSold,
capitalOutlay, numExcessLabourHours,
revenue_df, cost_df, profit_df))
gg_df = so.dict_to_frame(grainGrown, cols=['grainGrown'])
print(so.get_solution_table(grainAcres, gg_df))
sbg_df = so.dict_to_frame(sugarBeetGrown, cols=['sugerBeetGrown'])
print(so.get_solution_table(
grainBought, grainSold, sugarBeetAcres,
sbg_df, sugarBeetBought, sugarBeetSold))
num_acres = m.get_constraint('num_acres')
na_df = num_acres.get_expressions()
max_num_cows_con = m.get_constraint('max_num_cows_def')
mnc_df = max_num_cows_con.get_expressions()
print(so.get_solution_table(na_df, mnc_df))
return m.get_objective_value()
|
/sasoptpy-1.0.5.tar.gz/sasoptpy-1.0.5/examples/client_side/farm_planning.py
| 0.444806 | 0.259099 |
farm_planning.py
|
pypi
|
import sasoptpy as so
import random
def test(cas_conn, **kwargs):
# Data generation
n = 80
p = 0.02
random.seed(1)
ARCS = {}
for i in range(0, n):
for j in range(0, n):
if random.random() < p:
ARCS[i, j] = random.random()
max_length = 10
# Model
model = so.Model("kidney_exchange", session=cas_conn)
# Sets
NODES = set().union(*ARCS.keys())
MATCHINGS = range(1, int(len(NODES)/2)+1)
# Variables
UseNode = model.add_variables(NODES, MATCHINGS, vartype=so.BIN,
name="usenode")
UseArc = model.add_variables(ARCS, MATCHINGS, vartype=so.BIN,
name="usearc")
Slack = model.add_variables(NODES, vartype=so.BIN, name="slack")
print('Setting objective...')
# Objective
model.set_objective(so.expr_sum((ARCS[i, j] * UseArc[i, j, m]
for [i, j] in ARCS for m in MATCHINGS)),
name="total_weight", sense=so.MAX)
print('Adding constraints...')
# Constraints
Node_Packing = model.add_constraints((UseNode.sum(i, '*') + Slack[i] == 1
for i in NODES), name="node_packing")
Donate = model.add_constraints((UseArc.sum(i, '*', m) == UseNode[i, m]
for i in NODES
for m in MATCHINGS), name="donate")
Receive = model.add_constraints((UseArc.sum('*', j, m) == UseNode[j, m]
for j in NODES
for m in MATCHINGS), name="receive")
Cardinality = model.add_constraints((UseArc.sum('*', '*', m) <= max_length
for m in MATCHINGS),
name="cardinality")
# Solve
model.solve(options={'with': 'milp', 'maxtime': 300}, **kwargs)
# Define decomposition blocks
for i in NODES:
for m in MATCHINGS:
Donate[i, m].set_block(m-1)
Receive[i, m].set_block(m-1)
for m in MATCHINGS:
Cardinality[m].set_block(m-1)
model.solve(options={
'with': 'milp', 'maxtime': 300, 'presolver': 'basic',
'decomp': {'method': 'user'}}, **kwargs)
return model.get_objective_value()
|
/sasoptpy-1.0.5.tar.gz/sasoptpy-1.0.5/examples/client_side/sas_kidney_exchange.py
| 0.472683 | 0.272675 |
sas_kidney_exchange.py
|
pypi
|
import gym
import grpc
import numpy as np
from concurrent import futures
import argparse
from sasrl_env.common.env_pb2 import Info, Observation, Transition, Action, Empty, RenderOut, MetaData, \
StepInfoKVInt, StepInfoKVString, StepInfoKVFloat, StepInfoKVBool, StepInfo
from sasrl_env.common.env_pb2_grpc import EnvServicer as Service, \
add_EnvServicer_to_server as register
from sasrl_env.utils.utils import get_ip, get_space_message, serialize_data, deserialize_data
from sasrl_env.common.utils import get_logger
from sasrl_env.common.wrapper import Monitor
# gym packages to import
import pkg_resources
installed_packages = pkg_resources.working_set
installed_packages_list = sorted(["%s==%s" % (i.key, i.version)
for i in installed_packages
if (i.key.startswith('gym') or i.key.endswith('gym'))])
modules = sorted(["%s" % i.key.replace('-', '_')
for i in installed_packages
if ((i.key.startswith('gym') or i.key.endswith('gym')) and (i.key != 'gym'))])
for library in modules:
try:
exec("import {module}".format(module=library))
except Exception as e:
print(e)
logger = get_logger(log_level='debug')
def get_observation_m(observation):
"""
Creates the observation message
@param observation: observation which can be a scalar, numpy array, list, or dict
@return: observation message
"""
# serialize observations in a 1d numpy array
observation = serialize_data(observation)
# create the observation message
if np.issubdtype(observation.dtype, np.floating):
observation_m = Observation(data_f=observation)
elif np.issubdtype(observation.dtype, np.integer):
observation_m = Observation(data_i=observation)
else:
raise Exception('Observation should have a type int or float.')
return observation_m
def get_info_m(info: dict) -> StepInfo:
info_m = StepInfo()
for k, v in info.items():
if isinstance(v, str):
info = StepInfoKVString(key=k, value=v)
info_m.data_str.append(info)
elif isinstance(v, bool):
info = StepInfoKVBool(key=k, value=v)
info_m.data_bool.append(info)
elif isinstance(v, int):
info = StepInfoKVInt(key=k, value=v)
info_m.data_int.append(info)
elif isinstance(v, float):
info = StepInfoKVFloat(key=k, value=v)
info_m.data_float.append(info)
return info_m
def decode_action(action_m, action_space):
if action_m.data_i:
action = deserialize_data(action_m.data_i, action_space)
elif action_m.data_f:
action = deserialize_data(action_m.data_f, action_space)
return action
class Env(Service):
def __init__(self, port):
self.port = port
super(Env, self).__init__()
def Handshake(self, empty, _):
"""
Sets the metadata for environment server. This meta data includes the version of sasrl_env package and
it will be used to check for consistency of tkrl and sasrl_env.
@param empty:
@return: the metadata message which includes the version number
"""
# set the version manually
version = "1.2.0"
return MetaData(EnvVersion=version)
def Make(self, name_m, _):
"""
This function creates an environment instance on remote environment server.
@param name_m: name of the environment
@return: information message which includes observation_space, action_space and episode
length of the environment
"""
name = name_m.data
if not hasattr(self, 'env') or self.env.spec.id != name:
self.env = gym.make(name)
self.env = Monitor(self.env)
logger.info('Env {} created at port {}'.format(name, str(self.port)))
# wrap atari environments
if 'atari' in name_m.wrapper:
from sasrl_env.common.atari_wrappers import wrap_atari
self.env = wrap_atari(self.env)
# check validity of observation_space
try:
self.env.observation_space
except AttributeError:
raise AttributeError(
'Environment should have an observation_space object. Use either Box, Discrete, Tuple or Dict space '
'types to define the observation space.')
observation_space_m = get_space_message(self.env.observation_space)
# check validity of action_space
try:
self.env.action_space
except AttributeError:
raise AttributeError('Environment should have an action_space object. Use either Box, '
'Discrete, Tuple or Dict space types to define the action space.')
action_space_m = get_space_message(self.env.action_space)
try:
_max_episode_steps = self.env._max_episode_steps
except AttributeError:
_max_episode_steps = None
return Info(observation_space=observation_space_m,
action_space=action_space_m,
max_episode_steps=_max_episode_steps)
def Reset(self, empty_m, _):
"""
This function resets the environment and returns the encoded observation message.
@param empty_m: empty message
@return: 1 dimensional encoded observation and an info dict
"""
next_observation, info = self.env.reset()
next_observation = get_observation_m(next_observation)
info = get_info_m(info)
return Transition(next_observation=next_observation,
info=info)
def Step(self, action_m, _):
"""
This functions runs a step in the environment according to the received action message.
@param action_m: the action message
@return: the transition message which includes next observations, reward and terminal signal
"""
action = decode_action(action_m, self.env.action_space)
try:
next_observation, reward, done, info = self.env.step(action)
except TypeError:
next_observation, reward, done, info = self.env.step(action.tolist())
next_observation = get_observation_m(next_observation)
info = get_info_m(info)
return Transition(next_observation=next_observation,
reward=reward,
done=done,
info=info)
def Render(self, rendermode_m, _):
"""
Renders the environment if the .render() function of environment is implemented.
@param rendermode_m: the type of render. It can be 'rgb_array', ansi', or 'human'
@return: render message
"""
res = self.env.render(rendermode_m.data)
mode = rendermode_m.data
if mode == 'rgb_array':
reno = RenderOut(rgb_array=res.flatten())
elif mode == 'ansi':
reno = RenderOut(ansi=res)
elif mode == 'human':
reno = RenderOut()
else:
raise Exception("render mode {} not supported.".format(mode))
return reno
def Sample(self, empty_m, _):
"""
Samples an action from environment.
@param empty_m:
@return: the action message sampled from environment
"""
action = self.env.action_space.sample()
if np.issubdtype(action, np.floating):
action_m = Action(data_f=action.ravel())
elif np.issubdtype(action, np.integer):
action_m = Action(data_i=action.ravel())
else:
raise Exception("Sampling was unsuccessful due to unsupported data type.")
return action_m
def Close(self, empty_m, _):
"""
Closes the environment
@param empty_m: empty message
@return: empty message
"""
self.env.close()
return Empty()
def Seed(self, env_seed_m, _):
"""
Sets seed of the environment.
@param env_seed_m: the seed message
@return: empty message
"""
if hasattr(self.env, 'seed'):
self.env.seed(env_seed_m.data)
else:
logger.warning("There is no function to set seed in the environment.")
return Empty()
class register_server(object):
"""
This class is responsible for assigning the environment server to specified port.
"""
def start(self, port):
host = get_ip()
address = '{}:{}'.format(host, port)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
register(Env(port), server)
server.add_insecure_port(address)
server.start()
logger.info("Started env server at: {}".format(address))
server.wait_for_termination()
return 0
def start(port):
"""
Starts an environment server on the assigned port
@param port:
@return: status
"""
rs = register_server()
rs.start(port)
return 0
if __name__ == '__main__':
# this python file can be executed directly or via the environment controller
parser = argparse.ArgumentParser('environment server')
parser.add_argument('--port', '-p', type=int, default=10007,
help='the port number which hosts a single environment server.')
args = parser.parse_args()
# start a single environment server
start(args.port)
|
/sasrl_env-1.2.0-py3-none-any.whl/sasrl_env/serverSingle.py
| 0.632276 | 0.263798 |
serverSingle.py
|
pypi
|
import grpc
import numpy as np
from sasrl_env.common.env_pb2 import Action, Empty, Name, StepInfo, StepInfoKV
from sasrl_env.common.env_pb2_grpc import EnvStub
from sasrl_env.utils.utils import decode_space_message, serialize_data, deserialize_data
def decode_observation(observation_m, observation_space):
if observation_m.data_i:
observation = deserialize_data(observation_m.data_i, observation_space)
elif observation_m.data_f:
observation = deserialize_data(observation_m.data_f, observation_space)
else:
raise Exception("Client received no observation data.")
return observation
def decode_step_info(info_m: StepInfo):
info = {}
for info_m_kv in info_m.data_int:
info[info_m_kv.key] = info_m_kv.value
for info_m_kv in info_m.data_str:
info[info_m_kv.key] = info_m_kv.value
for info_m_kv in info_m.data_bool:
info[info_m_kv.key] = info_m_kv.value
for info_m_kv in info_m.data_float:
info[info_m_kv.key] = info_m_kv.value
return info
def get_action_m(action):
"""
Creates the action message
@param action: action which can be a scalar, numpy array, list, or dict
@return: action message
"""
# convert action to 1d numpy array
action = serialize_data(action)
# create the action message
if np.issubdtype(action.dtype, np.floating):
action_m = Action(data_f=action)
elif np.issubdtype(action.dtype, np.integer):
action_m = Action(data_i=action)
else:
raise Exception('Action should be a numpy array of type int or float.')
return action_m
class Env(object):
def __init__(self, name, address, wrapper=None):
self.channel = grpc.insecure_channel(address)
self.env = EnvStub(self.channel)
self.make(name, wrapper)
def make(self, name, wrapper=None):
info = self.env.Make(Name(data=name, wrapper=wrapper))
self.observation_space = decode_space_message(info.observation_space)
self.action_space = decode_space_message(info.action_space)
self._max_episode_steps = info.max_episode_steps
def reset(self):
transition = self.env.Reset(Empty())
next_observation = decode_observation(transition.next_observation, self.observation_space)
info = decode_step_info(transition.info)
return next_observation , info
def step(self, action):
action_m = get_action_m(action)
transition = self.env.Step(action_m)
next_observation = decode_observation(transition.next_observation, self.observation_space)
info = decode_step_info(transition.info)
return next_observation, transition.reward, transition.done, info
def Sample(self):
action = self.env.Sample(Empty())
return action
def Close(self):
self.env.Close()
def close(self):
self.channel.close()
if __name__ == '__main__':
import time
host = '10.122.32.31'
port = '10007'
address = '{}:{}'.format(host, port)
env_names = [
# 'CartPole-v0', 'MountainCar-v0', 'MountainCarContinuous-v0', 'Pendulum-v1',
# 'Acrobot-v1', 'LunarLander-v2', 'LunarLanderContinuous-v2', 'BipedalWalker-v3',
# 'BipedalWalkerHardcore-v3', 'Blackjack-v1', 'FrozenLake-v1', 'FrozenLake8x8-v1', 'CliffWalking-v0',
# 'Taxi-v3'
]
for game in [
'adventure',
# 'air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
# 'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
# 'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
# 'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
# 'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
# 'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
# 'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
# 'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
# 'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon'
]:
for obs_type in ['image', 'ram']:
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
env_names.append('{}-v0'.format(name))
# env_names_with_error = ['CarRacing-v0' #rendering error]
# env_names = env_names[-1:]
# env_names = env_names[12:13]
for env_name in env_names:
print(env_name)
env = Env(env_name, address)
st = time.time()
cnt = 0
for i in range(1):
s, info = env.reset()
cnt += 1
j = 0
done = False
while not done:
if env_name == 'LunarLanderContinuous-v2':
action = [1, 1]
elif env_name == 'BipedalWalker-v3' or env_name == 'BipedalWalkerHardcore-v3':
action = [1, 1, 1, 1]
else:
action = 1
ns, r, done, info = env.step(action)
cnt += 1
j += 1
s = ns
# report
if cnt % 100 == 0:
print("cnt: {} -- rate: {}".format(cnt, cnt / (time.time() - st)))
if j>200:
break
if cnt>1000:
break
env.close()
|
/sasrl_env-1.2.0-py3-none-any.whl/sasrl_env/client.py
| 0.573678 | 0.312567 |
client.py
|
pypi
|
import time
import gym
# This class is modified from the SB3 to monitor the env statistics
# https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/monitor.py
class Monitor(gym.Wrapper):
"""
A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.
:param env: The environment
:param allow_early_resets: allows the reset of the environment before it is done
:param reset_keywords: extra keywords for the reset call,
if extra parameters are needed at reset
:param info_keywords: extra information to log, from the information return of env.step()
"""
def __init__(
self,
env: gym.Env,
allow_early_resets: bool = True,
reset_keywords=(),
info_keywords=(),
):
super(Monitor, self).__init__(env=env)
self.t_start = time.time()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_returns = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
"""
Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True
:param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords
:return: the first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)"
)
self.rewards = []
self.needs_reset = False
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError(f"Expected you to pass keyword argument {key} into reset")
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action):
"""
Step the environment with the given action
:param action: the action
:return: observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done_terminated, done_truncated, info = self.env.step(action)
self.rewards.append(reward)
done = False
if done_terminated or done_truncated:
done = True
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
ep_len = len(self.rewards)
info["ep_rew"] = ep_rew
info["ep_len"] = ep_len
info["ep_t"] = round(time.time() - self.t_start, 6)
info["real_done"] = True
self.episode_returns.append(ep_rew)
self.episode_lengths.append(ep_len)
self.episode_times.append(time.time() - self.t_start)
self.total_steps += 1
return observation, reward, done, info
def close(self) -> None:
"""
Closes the environment
"""
super(Monitor, self).close()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
:return:
"""
return self.total_steps
def get_episode_rewards(self):
"""
Returns the rewards of all the episodes
:return:
"""
return self.episode_returns
def get_episode_lengths(self):
"""
Returns the number of timesteps of all the episodes
:return:
"""
return self.episode_lengths
def get_episode_times(self):
"""
Returns the runtime in seconds of all the episodes
:return:
"""
return self.episode_times
|
/sasrl_env-1.2.0-py3-none-any.whl/sasrl_env/common/wrapper.py
| 0.840161 | 0.479321 |
wrapper.py
|
pypi
|
def attColorScale(numOfShades, up, attCount):
if attCount == numOfShades:
return False
if attCount == 0:
return True
return up
"""If the color shading is normal (up == True) it proceed to shade from lighter to darker colors, otherwise it follows a backward shade:"""
def selectAttColor(color, shadingIntensity, up):
if up:
return color - int(shadingIntensity,16)
else:
return color + int(shadingIntensity,16)
"""Add or reduce the attribute counter according to the shade order we are following:"""
def setCounter(attCount, up):
if up:
return attCount + 1
else:
return attCount - 1
"""Select an hexadecimal value for the starting color of the dataframe shade (the lighter one):"""
def startingColorSel(color,loneAttribute):
if color == 'blue':
if loneAttribute:
return 'E2EDF8'
else:
return 'EBF3FA'
if color == 'gray':
if loneAttribute:
return'BFBFBF'
else:
return 'CCCCCC'
"""Select the intensity of the shades between the attributes of the dataframe:"""
def shadingIntensitySel(color,loneAttribute):
if color == 'blue':
if loneAttribute:
return '3A230E'
else:
return 'A0602'
if color == 'gray':
if loneAttribute:
return '191919'
else:
return 'D0D0D'
"""### <font color=#85C1E9>Text formatting</font>
Paints <font color=red>FALSE</font> values in <font color=red>red</font> and <font color=green>TRUE</font> values in <font color=green>green</font> text:
"""
def color_true_false(value):
if type(value) != type(''):
if value == 1:
color = 'green'
else:
color = 'red'
return 'color: {}'.format(color)
"""Paints values that are equal to 1 with <font color=green>green</font> text:"""
def color_val(value):
if type(value) != type(''):
if value == 1:
color = 'green'
return 'color: {}'.format(color)
"""Paints value that are equal to 1.0 with <font color=#FDFEFE>white</font> text:"""
def color_white(value):
if type(value) != type(''):
if value == 1.0:
color = '#FDFEFE'
return 'color: {}'.format(color)
"""### <font color=#85C1E9>Row formatting</font>
Shades automatically the rows of the table with a <font color=red>c<font color=orange>o<font color=yellow>l<font color=green>o<font color=blue>r<font color=blue> s<font color=green>c<font color=yellow>a<font color=orange>l<font color=red>e</font></font></font></font></font></font></font></font></font></font> for each *attribute_name* group that was defined during the preprocessing phase.
Works differently based on the fact that we want to focus on a single attribute (the sensitive one) or on the entire dataframe.
Parameters meanings:
- `tab` $\rightarrow$ the dataframe to be formatted (pandas df)
- `startingColor` $\rightarrow$ the lighter color to start the shade process (hexadecimal color)
- `NumOfShades` $\rightarrow$ the number of shades to apply (integer)
- `shadingIntensity` $\rightarrow$ the intensity between each shade (hexadecimal color)
- `loneAttribute` $\rightarrow$ indicates if the analysis is required only on a sensitive attribute (boolean)
"""
def highlight(tab, startingColor, numOfShades, shadingIntensity, loneAttribute):
df = pd.DataFrame('', index=tab.index, columns=tab.columns)
color = int(startingColor,16)
attCount = 0
up = True
if loneAttribute:
for att in tab['attribute_name']:
bgcolor = 'background-color: #' + str(format(color, 'X'))
df.iloc[attCount, :] = bgcolor
up = attColorScale(numOfShades-1, up, attCount)
color = selectAttColor(color, shadingIntensity, up)
attCount = setCounter(attCount, up)
else:
for att in tab['attribute_name'].unique():
mask = tab['attribute_name'] == att
bgcolor = 'background-color: #' + str(format(color, 'X'))
df.loc[mask, :] = bgcolor
up = attColorScale(numOfShades-1, up, attCount)
color = selectAttColor(color, shadingIntensity, up)
attCount = setCounter(attCount, up)
return df
"""Apply a <font color=green>green</font> background to cells containing a *1.0* value:"""
def highlight_100(value):
if type(value) != type(''):
if value == 1.0:
return 'background-color: #16A085'
"""---
Apply a background gradient to each column of the table ***tab*** belonging to the ***sel_col*** list (using `axis=0` along the rows or using `axis=None` along both axes), and format their values to two decimal also applying percentage:
"""
def table_gradient(tab,tab_style,sel_col,loneAttribute):
if sel_col:
tab_style.format({col: "{:.2%}" for col in tab.columns if col in sel_col})
if loneAttribute:
tab_style.background_gradient(axis=None, subset = sel_col)
else:
tab_style.background_gradient(axis=0, subset = sel_col)
"""### <font color=#85C1E9>Table formatting</font>
Formats text and background for each row of the dataframe:
1. Set <font color=black>black</font> text as default
2. Set <font color=navy>navy</font> text to values belonging to the *attribute_value* column
3. Set a sequence of <font color=lightblue>light blue</font> shades as background for each defined group
4. Add a gradient to the columns belonging to *sel_col* list
5. Set a <font color=green>green</font> background to the cells containing 100.00%
6. Set <font color=#FDFEFE>white</font> text to the cells containing 100.00%
"""
def format_blue_table(tab, sel_col, numOfShades, loneAttribute):
tab_style = tab.style
startingColor = startingColorSel('blue',loneAttribute)
shadingIntensity = shadingIntensitySel('blue',loneAttribute)
tab_style.set_properties(**{'color': 'black'})
tab_style.set_properties(**{'color': 'navy'}, subset = ['attribute_value'])
tab_style.apply(highlight, startingColor = startingColor, numOfShades = numOfShades, shadingIntensity = shadingIntensity, loneAttribute = loneAttribute, axis = None)
table_gradient(tab,tab_style,sel_col,loneAttribute)
tab_style.applymap(highlight_100, subset = sel_col)
tab_style.applymap(color_white, subset = sel_col)
return tab_style
"""Formats text and background for each row of the dataframe:
1. Set <font color=black>black</font> text for the values belonging to the *attribute_name* column
2. Set <font color=navy>navy</font> text to values belonging to the *attribute_value* column
3. Set a sequence of <font color=gray>gray</font> shades as background for each defined group
4. Set red and green colored text of the cells containing respectively False and True
5. Apply a <font color=red>gradient</font> to the columns belonging to the *sel_col* list
6. Set <font color=green>green</font> colored text of the cells containing '1'
"""
def format_gray_table(tab,sel_col, numOfShades, loneAttribute):
tab_style = tab.style
startingColor = startingColorSel('gray',loneAttribute)
shadingIntensity = shadingIntensitySel('gray',loneAttribute)
tab_style.set_properties(**{'color': 'black'}, subset = ['attribute_name'])
if 'attribute_value' in tab.columns:
tab_style.set_properties(**{'color': 'navy'}, subset = ['attribute_value'])
tab_style.apply(highlight, startingColor = startingColor, numOfShades = numOfShades, shadingIntensity = shadingIntensity, loneAttribute = loneAttribute, axis = None)
tab_style.applymap(color_true_false)
if loneAttribute:
tab_style.text_gradient(axis=1, cmap='gist_heat', subset = sel_col, low=0, high=0.5)
else:
tab_style.text_gradient(axis=0, cmap='gist_heat', subset = sel_col, low=0, high=0.5)
tab_style.applymap(color_val)
return tab_style
|
/sassss-0.0.1-py3-none-any.whl/src/DFFormatter/dataframeStyler.py
| 0.715126 | 0.614163 |
dataframeStyler.py
|
pypi
|
# sassy
[](https://travis-ci.org/jmsmistral/sassy)
A simple but powerful templating language for text interpolation inspired by sas macros.
## How it works
**Sassy** is a command-line program that **interpolates text** by interpreting a set of pre-defined **tags** that control text replacement, and loops, amongst other things.
- A tag starts with a `%` and ends with a `;`.
- Everything that is not a tag, or within open/close tags is interpreted as plaintext and is left untouched.
Below is a description of each tag, with examples.
### Macros
- Macros are named blocks of text enclosed within `%macro` and `%mend` tags, that are interpolated when executed. A macro can be executed at any point after it is defined
- A macro looks a lot like a function, and must be given a name and a list of parameters within parentheses (empty for no parameters): `%macro <name>(<param>, ...);`
- Macros are executed with the `%exec` tag, referencing the macro name, and passing the required parameters (if any)
- When run, macros will interpolate parameters and tags within it's body, and output the resulting string
- Macros currently accept a maximum number of 25 parameters
***test.txt***
```
%macro testMacro(param1);
This text will show whenever the macro is called.
We can reference macro parameters like this: ¶m1.
%mend;
Here's how you call a macro:
%exec testMacro(1);
Here's a call to the same macro with a different parameter:
%exec testMacro(a parameter can contain spaces);
```
Running `sassy test.txt` will generate the following...
```
Here's how you execute a macro:
This text will be interpolated whenever the macro is executed.
We can reference macro parameters like this: 1
Here's a call to the same macro with a different parameter:
This text will be interpolated whenever the macro is executed.
We can reference macro parameters like this: a parameter can contain spaces
```
### Variables
- Variables are named references to strings, which can be used within macros, and loop blocks
- Variables are declared using the `%let` tag, as follows: `%let <name> = <value>;`
- The value is includes everything after the equal symbol `=`
- Variables can be referenced by wrapping the variable name within `&` and `.`, e.g. `&<name>.`
- Macro parameters are referenced in the same way as variables, `&<param>.`
- Variables references can be nested (see example below)
***test.txt***
```
%let var1 = some value;
%let var2 = some other value;
%let varNum =2;
%macro testMacro(param1);
Here's how you reference variables: &var1.
Macro parameters are referenced in the same way as variables: ¶m1.
Variables and parameters can be nested to dynamically compose references to other variables: &var&varNum..
%mend;
Here's what that looks like:
%exec testMacro(1);
%exec testMacro(2);
```
Running `sassy test.txt` will generate the following...
```
Here's what that looks like:
Here's how you reference variables: some value
Macro parameters are referenced in the same way as variables: 1
Variables and parameters can be nested to dynamically compose references to other variables: some other value
Here's how you reference variables: some value
Macro parameters are referenced in the same way as variables: 2
Variables and parameters can be nested to dynamically compose references to other variables: some other value
```
### Loops
- Loops are blocks of text enclosed within `%procloop` and `%pend` tags, that are interpolated multiple times in succession
- Unlike macros, loops do not have names and are interpolated in-place
- A loop is declared as follows: `%procloop (<integer>) <counter_name>;`
- `<integer>` - Defines the number of times the loop will execute. This can also be a reference to a variable
- `<counter_name>` - Is a name given to the loop counter, that can be referenced within the loop body as a variable
- The loop counter is zero-based
***test.txt***
```
This is how you execute a loop:
%procloop (3) loopCounter;
This loop will execute &loopCounter. times.
%pend;
You can also use a variable to set the number of iterations:
%let loopVar0 = first loop;
%let loopVar1 = second loop;
%let loopVar2 = third loop;
%let loopVar3 = fourth loop;
%let numLoops = 4;
%procloop (&numLoops.) counterVar;
This other loop will execute &counterVar. times, and references a different variable each time: &loopVar&counterVar..
%pend;
```
Running `sassy test.txt` will generate the following...
```
This is how you execute a loop:
This loop will execute 0 times.
This loop will execute 1 times.
This loop will execute 2 times.
You can also use a variable to set the number of iterations:
This other loop will execute 0 times, and references a different variable each time: first loop
This other loop will execute 1 times, and references a different variable each time: second loop
This other loop will execute 2 times, and references a different variable each time: third loop
This other loop will execute 3 times, and references a different variable each time: fourth loop
```
---
## Installation
Here's what you need to do to install sassy:
### Python 3.6+
Sassy is compatible with **Python 3.6 and later**.
On Unix systems, install Python 3.6 (or later) via your package manager (apt, rpm, yum, brew).
Alternatively, you can download an installation package from the [official Python downloads page](https://www.python.org/downloads/)
### Virtual Environment
It is recommended to put all project dependencies into its own virtual
environment - this way we don't pollute the global Python installation.
For this we recommend you use **virtualenvwrapper**. Follow the instructions
[here](http://virtualenvwrapper.readthedocs.io/en/latest/install.html)
to get this installed. Once you have virtualenvwrapper install, create
a new virtual environment with:
```bash
mkvirtualenv sassy
workon sassy
```
Now let's install sassy:
```bash
pip install sassylang
```
### Get help or give help
- Open a new [issue](https://github.com/jmsmistral/sassy/issues/new) you encounter a problem.
- Pull requests welcome. You can help with language features!
---
## License
- sassy is Free Software and licensed under the [GPLv3](https://github.com/jmsmistral/macrosql/blob/master/LICENSE.txt)
- Main author is [@jmsmistral](https://github.com/jmsmistral)
|
/sassylang-0.0.5.tar.gz/sassylang-0.0.5/README.md
| 0.832679 | 0.959837 |
README.md
|
pypi
|
import numpy as np
class Transform(object):
name='Transform base class'
def __init__(self):
pass
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
raise NotImplementedError('Transform is an abstract class. Please subclass it and override the do_transform method.')
def __call__(self,*args,**kwargs):
return self.do_transform(*args,**kwargs)
def xlabel(self,unit=u'1/\xc5'):
return u'q (%s)' %unit
def ylabel(self,unit=u'1/cm'):
return u'y (%s)' % unit
def __str__(self):
return self.name
__unicode__=__str__
def __repr__(self):
return '<Transform (%s)>'%self.name
class TransformGuinier(Transform):
_factory_arguments=[(0,),(1,),(2,)]
name='Guinier'
def __init__(self,qpower=0):
self._qpower=qpower
if qpower==0:
self.name='Guinier (ln I vs. q)'
elif qpower==1:
self.name='Guinier cross-section (ln Iq vs. q)'
elif qpower==2:
self.name='Guinier thickness (ln Iq^2 vs. q)'
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
d=kwargs
d['x']=np.power(x,2)
d['y']=np.log(y*np.power(x,self._qpower))
if dy is not None:
d['dy']=np.absolute(dy/y)
if dx is not None:
d['dx']=2*np.absolute(x)*dx
return d
class TransformLogLog(Transform):
name=''
_factory_arguments=[(True,True),(True,False),(False,True),(False,False)]
def __init__(self,xlog=True,ylog=True):
self._xlog=xlog
self._ylog=ylog
if xlog and ylog:
self.name='Double logarithmic'
elif xlog:
self.name='Logarithmic x'
elif ylog:
self.name='Logarithmic y'
else:
self.name='Linear'
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
d=kwargs
if self._xlog:
d['x']=np.log(x)
if dx is not None:
d['dx']=np.absolute(dx/x)
else:
d['x']=np.array(x) # make a copy!
if dx is not None:
d['dx']=np.array(dx) # make a copy!
if self._ylog:
d['y']=np.log(y)
if dy is not None:
d['dy']=np.absolute(dy/y)
else:
d['y']=np.array(y) # make a copy!
if dy is not None:
d['dy']=np.array(dy) # make a copy!
return d
class TransformPorod(Transform):
name='Porod'
_factory_arguments=[(4,),(3,)]
def __init__(self,exponent=4):
self._exponent=exponent
self.name='Porod (q^%s)'%exponent
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
d=kwargs
d['x']=np.power(x,self._exponent)
d['y']=np.power(x,self._exponent)*y
if dy is not None:
d['dy']=np.power(x,self._exponent)*dy
if dx is not None:
d['dx']=np.power(x,self._exponent-1)*(self._exponent)*dx
return d
class TransformKratky(Transform):
name='Porod'
_factory_arguments=[(2,)]
def __init__(self,exponent=2):
self._exponent=exponent
self.name='Kratky (Iq^%s vs. q)'%exponent
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
d=kwargs
d['x']=np.power(x,self._exponent)
d['y']=np.power(x,self._exponent)*y
if dy is not None:
d['dy']=np.power(x,self._exponent)*dy
if dx is not None:
d['dx']=np.power(x,self._exponent-1)*(self._exponent)*dx
return d
class TransformShullRoess(Transform):
name='Shull-Roess'
_factory_arguments=None
def __init__(self,r0):
self._r0=r0
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
d=kwargs
d['x']=np.log(np.power(x,2)+3/self._r0**2)
d['y']=np.log(y)
if dy is not None:
d['dy']=np.absolute(dy/y)
if dx is not None:
d['dx']=2*x*dx/(np.power(x,2)+3/self._r0**2)
return d
class TransformZimm(Transform):
name='Zimm'
def __init__(self):
pass
def do_transform(self,x,y,dy=None,dx=None,**kwargs):
d=kwargs
d['x']=np.power(x,2)
d['y']=1/y
if dy is not None:
d['dy']=dy/y
if dx is not None:
d['dx']=2*np.absolute(x)*dx
return d
|
/sastool-1.0.4.tar.gz/sastool-1.0.4/retired_code/transform.py
| 0.484868 | 0.202601 |
transform.py
|
pypi
|
import numpy as np
from sastools.analyzer.enums import SAXSStandards
from sastools.analyzer.llcphase import LLCPhase
from sastools.analyzer.llcphases import (
CubicPhase,
HexagonalPhase,
IndeterminatePhase,
LamellarPhase,
)
class PrepareStandard:
"""Contains methods for preparing the calibration of measured data
using a standard.
"""
def __init__(
self, standard: SAXSStandards = None, q_std_lit: list[float] = None
) -> None:
"""Select standard for calibration of SAXS data from
`SAXSStandards` enum. If no standard is given, `q_std_lit`
values have to be provided directly or calculated from
`d_std_lit` manually using `calculate_scattering_vector()`.
Args:
standard (SAXSStandards, optional): Common SAXS standard used in experiment. Defaults to None.
q_std_lit (list[float], optional): Literature values for scattering vector of standard used in experiment. Defaults to None.
Raises:
ValueError: If both standard and q_std_lit are provided.
NotImplementedError: If not yet implemented SAXS standard is passed.
"""
self._standard = standard
self._q_std_lit = q_std_lit
if self._standard and self._q_std_lit:
raise ValueError(
f"Both a standard = '{self.standard.name}' and a custom q_std_lit = '{self.q_std_lit}' were given. These arguments are mutually exclusive, please choose only one!"
)
elif self._standard == SAXSStandards.CHOLESTERYL_PALMITATE:
self.calculate_scattering_vector()
elif self._standard is None and self._q_std_lit:
pass
elif self._standard is None and self._q_std_lit is None:
pass
else:
raise NotImplementedError(
f"SAXS Standard {standard.name} is not yet implemented."
)
def calculate_scattering_vector(
self, d_std_lit: list[float] = None
) -> list[float]:
"""Calculate scattering vector `q_std_lit` (nm^-1) for
calibration from literature lattice plane distance `d_std_lit`
(nm).
Args:
d_std_lit (list[float], optional): Lattice plane distance from literature. Defaults to None.
Raises:
ValueError: If d_std_lit is not given and neither a standard nor q_std_lit have been initialized.
ValueError: If d_std_lit list is empty.
Returns:
list[float]: Scattering vector q_std_lit.
"""
if self._q_std_lit:
print(
f"INFO: q_std_lit = {self._q_std_lit} has already been provided or calculated. Passing method call."
)
return self._q_std_lit
elif self._standard == SAXSStandards.CHOLESTERYL_PALMITATE:
d_std_lit = [5.249824535, 2.624912267, 1.749941512]
# Reference: D. L. Dorset, Journal of Lipid Research 1987,
# 28, 993-1005.
else:
if d_std_lit is None:
raise ValueError(
"d_std_lit has to be given, as neither a SAXS standard nor q_std_lit have been initialized!"
)
elif len(d_std_lit) < 1:
raise ValueError(
f"d_std_lit = {d_std_lit} cannot be an empty list!"
)
self._q_std_lit = [(2 * np.pi) / d for d in d_std_lit]
return self._q_std_lit
def calculate_linear_regression(
self, q_std_meas: list[float]
) -> tuple[float, float]:
"""Calculate the linear regression from `q_std_meas` against
`q_std_lit` using `numpy.polyfit()` and return `slope` and
`intercept` as a tuple.
Args:
q_std_meas (list): List of measured q values for standard.
Returns:
tuple: Tuple of slope and intercept from linear regression.
"""
slope, intercept = np.polyfit(x=q_std_meas, y=self._q_std_lit, deg=1)
return (slope, intercept)
@property
def standard(self) -> SAXSStandards:
"""Get standard used for this preparation."""
return self._standard
@property
def q_std_lit(self) -> list[float]:
"""Get lscattering vectors of standard from literature used."""
return self._q_std_lit
class LLCAnalyzer:
"""Contains methods for analyzing SAXS data of LLC phases."""
def __init__(self) -> None:
self._q_corr = []
self._d_measured = []
self._d_ratio = []
def _calculate_lattice_plane_distances(self) -> None:
# Calculate and return the lattice planes `d` from list of
# calibrated scattering vectors `q_corr`.
self._d_measured = [(2 * np.pi) / q for q in self.q_corr]
def calibrate_data(
self, slope: float, q_meas: list[float], intercept: float
) -> None:
"""Calibrate list of measured scattering vectors `q_meas` with
`(slope, intercept)` tuple from `calculate_linear_regression()`
method of `PrepareStandard` class and return list of calibrated
scattering vectors `q_corr`.
Args:
slope (float): Slope of calculated linear regression from measured standard against literature values.
q_meas (list[float]): List of scattering vectors from raw measured data.
intercept (float): Intercept of calculated linear regression from measured standard against literature values.
"""
self._q_corr = [slope * q + intercept for q in q_meas]
def calculate_lattice_ratio(self) -> None:
"""Calculate and return the lattice plane ratios `d_ratio` from
list of lattice planes `d`.
"""
self._calculate_lattice_plane_distances()
self._d_ratio = [d / self.d_measured[0] for d in self.d_measured[1:]]
def determine_phase(self) -> LLCPhase:
"""Determine the LLC phase of the sample from `d_ratios` and
return the appropriate `LLCPhase` object.
Returns:
LLCPhase: Class holding relevant phase information of corresponding LLC phase.
"""
H1 = [
(1 / np.sqrt(3)),
(1 / np.sqrt(4)),
(1 / np.sqrt(7)),
(1 / np.sqrt(9)),
]
V1 = [
(1 / np.sqrt(2)),
(1 / np.sqrt(3)),
(1 / np.sqrt(4)),
(1 / np.sqrt(5)),
]
La = [(1 / 2), (1 / 3), (1 / 4), (1 / 5)]
for i, _ in enumerate(self.d_ratio):
if (abs(self.d_ratio[i] - H1[i])) < 0.03:
return HexagonalPhase()
elif (abs(self.d_ratio[i] - V1[i])) < 0.03:
return CubicPhase()
elif (abs(self.d_ratio[i] - La[i])) < 0.03:
return LamellarPhase()
else:
return IndeterminatePhase()
@property
def q_corr(self) -> list[float]:
"""Get calibrated scattering vectors."""
return self._q_corr
@property
def d_measured(self) -> list[float]:
"""Get lattice plane distances."""
return self._d_measured
@property
def d_ratio(self) -> list[float]:
"""Get lattice plane ratios."""
return self._d_ratio
|
/analyzer/saxsanalyzers.py
| 0.944638 | 0.508544 |
saxsanalyzers.py
|
pypi
|
import numpy as np
from sastools.analyzer.enums import LLCPhases, LLCSpaceGroups, LLCMillerIndices
from sastools.analyzer.llcphase import LLCPhase
class HexagonalPhase(LLCPhase):
"""Container for properties of hexagonal LLC phases."""
def __init__(self) -> None:
self._exact_phase = None
self._space_group = LLCSpaceGroups.P6MM
self._miller_indices = ()
self._lattice_parameters = []
self._phase_information = {}
def __repr__(self) -> str:
return "Hexagonal LLC Phase"
def _calculate_a_H1(self, d: float, h: int, k: int) -> float:
# Calculate and return the lattice parameter for a given lattice
# plane distance d, miller index h, and miller index k.
a_H1 = d * np.sqrt((4 / 3) * ((h**2 + k**2 + (h * k))))
return a_H1
def calculate_lattice_parameters(
self, d_meas: list[float], phase: LLCPhases = LLCPhases.H1
) -> None:
"""Calculate lattice parameters of hexagonal phase using a list
of measured lattice plane distances `d_meas`.
Args:
d_meas (list[float]): Measured lattice plane distances.
phase (LLCPhases, optional): The hexagonal phase of the system. Defaults to LLCPhases.H1.
Raises:
NotImplementedError: If phase provided is not yet implemented.
"""
if not phase == LLCPhases.H1:
raise NotImplementedError(
f"Chosen LLC phase '{phase}' is not (yet) supported."
)
self._exact_phase = phase
for i, j in enumerate(d_meas):
a_i = self._calculate_a_H1(
d_meas[i], self.miller_indices[0][i], self.miller_indices[1][i]
)
self.lattice_parameters.append(a_i)
@property
def exact_phase(self) -> LLCPhases:
"""Get hexagonal phase."""
return self._exact_phase
@exact_phase.setter
def exact_phase(self, phase: LLCPhases) -> None:
self._exact_phase = phase
@property
def space_group(self) -> LLCSpaceGroups:
"""Get space group of hexagonal phase."""
return self._space_group
@property
def miller_indices(self) -> tuple[list[int], list[int], list[int]]:
"""Get miller indices of hexagonal phase."""
self._miller_indices = LLCMillerIndices[self._space_group.name].value
return self._miller_indices
@property
def lattice_parameters(self) -> list[float]:
"""Get lattice parameters of hexagonal phase."""
return self._lattice_parameters
@property
def phase_information(self) -> dict:
"""Get full phase information of hexagonal phase."""
self._phase_information = dict(
phase=self.exact_phase.value,
lattice_parameter=np.mean(self.lattice_parameters),
)
return self._phase_information
class CubicPhase(LLCPhase):
"""Container for properties of cubic LLC phases."""
def __init__(self) -> None:
self._exact_phase = None
self._space_group = None
self._miller_indices = ()
self._lattice_parameters = []
self._phase_information = {}
self._d_reciprocal = []
self._sqrt_miller = []
def __repr__(self) -> str:
return f"Cubic LLC Phase"
def _calculate_a_V1(self, d: float, h: int, k: int, l: int) -> float:
# Calculate and return the lattice parameter for a given lattice
# plane distance d, miller index h, k, and l.
a_V1 = d * (np.sqrt((h**2) + (k**2) + (l**2)))
return a_V1
def calculate_lattice_parameters(
self,
d_meas: list[float],
phase: LLCPhases = LLCPhases.V1,
space_group: LLCSpaceGroups = LLCSpaceGroups.IA3D,
) -> None:
"""Calculate lattice parameters of cubic phase using a list of
measured lattice plane distances `d_meas`.
Args:
d_meas (list[float]): Measured lattice plane distances.
phase (LLCPhases, optional): The cubic phase of the system. Defaults to LLCPhases.V1.
space_group (LLCSpaceGroups, optional): The space group corresponding to the cubic phase. Defaults to LLCSpaceGroups.IA3D.
Raises:
NotImplementedError: If phase provided is not yet implemented.
"""
if not phase == LLCPhases.V1:
raise NotImplementedError(
f"Chosen LLC phase '{phase}' is not (yet) supported."
)
self._exact_phase = phase
self._space_group = space_group
for i, j in enumerate(d_meas):
a_i = self._calculate_a_V1(
d_meas[i],
self.miller_indices[0][i],
self.miller_indices[1][i],
self.miller_indices[2][i],
)
self._lattice_parameters.append(a_i)
def calculate_d_reciprocal(self, peak_center: list[float]) -> None:
"""Calculate the reciprocal lattice plane distances
`d_reciprocal` from the `peak_centers` determined through
lorentzian fitting.
Args:
peak_center (list[float]): Peak centers determined by lorentzian fitting for the cubic phase.
"""
self._d_reciprocal = [peak / (2 * np.pi) for peak in peak_center]
def calculate_sqrt_miller(self) -> None:
"""Calculate the square roots `sq_root` of the `miller_indices`
corresponding to the peaks of the cubic phase.
"""
self._sqrt_miller = [
np.sqrt(
self.miller_indices[0][i] ** 2
+ self.miller_indices[1][i] ** 2
+ self.miller_indices[2][i] ** 2
)
for i in range(len(self.d_reciprocal))
]
@property
def exact_phase(self) -> LLCPhases:
"""Get cubic phase."""
return self._exact_phase
@exact_phase.setter
def exact_phase(self, phase: LLCPhases) -> None:
self._exact_phase = phase
@property
def space_group(self) -> LLCSpaceGroups:
"""Get space group of cubic phase."""
return self._space_group
@space_group.setter
def space_group(self, space_group: LLCSpaceGroups) -> None:
self._space_group = space_group
@property
def miller_indices(self) -> tuple[list[int], list[int], list[int]]:
"""Get miller indices of cubic phase."""
if self.space_group is None:
raise ValueError("space_group property has to be provided first.")
self._miller_indices = LLCMillerIndices[self._space_group.name].value
return self._miller_indices
@property
def lattice_parameters(self) -> list[float]:
"""Get lattice parameters of cubic phase."""
return self._lattice_parameters
@property
def phase_information(self) -> dict:
"""Get full phase information of cubic phase."""
self._phase_information = dict(
phase=self.exact_phase.value,
lattice_parameter=np.mean(self.lattice_parameters),
)
return self._phase_information
@property
def d_reciprocal(self) -> list[float]:
"""Get reciprocal lattice plane distances of cubic phase."""
return self._d_reciprocal
@property
def sqrt_miller(self) -> list[int]:
"""Get square roots of miller indices of cubic phase."""
return self._sqrt_miller
class LamellarPhase(LLCPhase):
"""Container for properties of lamellar LLC phases."""
def __init__(self) -> None:
self._exact_phase = None
self._space_group = None
self._miller_indices = None
self._lattice_parameters = []
self._phase_information = {}
def __repr__(self) -> str:
return "Lamellar LLC Phase"
def calculate_lattice_parameters(
self, d_meas: list[float], phase: LLCPhases = LLCPhases.LA
) -> None:
"""Calculate lattice parameters of lamellar phase using a list
of measured lattice plane distances `d_meas`.
Args:
d_meas (list[float]): Measured lattice plane distances.
phase (LLCPhases, optional): The lamellar phase of the system. Defaults to LLCPhases.LA.
Raises:
NotImplementedError: If phase provided is not yet implemented.
"""
if not phase == LLCPhases.LA:
raise NotImplementedError(
f"Chosen LLC phase '{phase}' is not (yet) supported."
)
self._exact_phase = phase
self._lattice_parameters.append(d_meas[0])
@property
def exact_phase(self) -> LLCPhases:
"""Get lamellar phase."""
return self._exact_phase
@exact_phase.setter
def exact_phase(self, phase: LLCPhases) -> None:
self._exact_phase = phase
@property
def space_group(self) -> None:
"""Get space group of lamellar phase."""
return self._space_group
@property
def miller_indices(self) -> None:
"""Get miller indices of lamellar phase."""
return self._miller_indices
@property
def lattice_parameters(self) -> list[float]:
"""Get lattice parameters of lamellar phase."""
return self._lattice_parameters
@property
def phase_information(self) -> dict:
"""Get full phase information of lamellar phase."""
self._phase_information = dict(
phase=self.exact_phase.value,
lattice_parameter=self.lattice_parameters[0],
)
return self._phase_information
class IndeterminatePhase(LLCPhase):
"""Container for properties of indeterminate LLC phases."""
def __init__(self) -> None:
self._exact_phase = LLCPhases.INDETERMINATE
self._space_group = None
self._miller_indices = None
self._lattice_parameters = None
self._phase_information = {}
def __repr__(self) -> str:
return "Indeterminate LLC Phase"
def calculate_lattice_parameters(
self, d_meas: list[float], phase: LLCPhases = LLCPhases.INDETERMINATE
) -> None:
"""Do not use this method! Indeterminate phases have no lattice
parameters.
Args:
d_meas (list[float]): Measured lattice plane distances.
phase (LLCPhases, optional): Indeterminate phase. Defaults to LLCPhases.INDETERMINATE.
Raises:
NotImplementedError: If this method is called.
"""
raise NotImplementedError(
f"No lattice parameter in indeterminate phases!"
)
@property
def exact_phase(self) -> LLCPhases:
"""Get indeterminate phase."""
return self._exact_phase
@property
def space_group(self) -> None:
"""Get space group of indeterminate phase."""
return self._space_group
@property
def miller_indices(self) -> None:
"""Get miller indices of indeterminate phase."""
return self._miller_indices
@property
def lattice_parameters(self) -> None:
"""Get lattice parameters of indeterminate phase."""
return self._lattice_parameters
@property
def phase_information(self) -> dict:
"""Get full phase information of indeterminate phase."""
self._phase_information = dict(
phase=self.exact_phase.value,
lattice_parameter="-",
)
return self._phase_information
|
/analyzer/llcphases.py
| 0.944574 | 0.49823 |
llcphases.py
|
pypi
|
import os
import unittest
import sys
from PyQt5 import QtGui
from PyQt5 import QtWidgets
"""
Unit tests for the QT GUI
=========================
In order to run the tests, first install SasView and sasmodels to site-packages
by running ``python setup.py install`` in both repositories.
The tests can be run with ``python GUITests.py``, or
``python GUITests.py suiteName1 suiteName2 ...`` for a subset of tests.
To get more verbose console output (recommended), use ``python GUITests.py -v``
"""
# List of all suite names. Every time a new suite is added, its name should
# also be added here
ALL_SUITES = [
'calculatorsSuite',
'mainSuite',
'fittingSuite',
'plottingSuite',
'utilitiesSuite',
'corfuncPerspectiveSuite',
'invariantPerspectiveSuite',
'inversionPerspectiveSuite',
]
# Prepare the general QApplication instance
app = QtWidgets.QApplication(sys.argv)
# Main Window
from MainWindow.UnitTesting import AboutBoxTest
from MainWindow.UnitTesting import DataExplorerTest
from MainWindow.UnitTesting import WelcomePanelTest
from MainWindow.UnitTesting import DroppableDataLoadWidgetTest
from MainWindow.UnitTesting import GuiManagerTest
from MainWindow.UnitTesting import MainWindowTest
## Plotting
from Plotting.UnitTesting import AddTextTest
from Plotting.UnitTesting import PlotHelperTest
from Plotting.UnitTesting import WindowTitleTest
from Plotting.UnitTesting import ScalePropertiesTest
from Plotting.UnitTesting import SetGraphRangeTest
from Plotting.UnitTesting import LinearFitTest
from Plotting.UnitTesting import PlotPropertiesTest
from Plotting.UnitTesting import PlotUtilitiesTest
from Plotting.UnitTesting import ColorMapTest
from Plotting.UnitTesting import BoxSumTest
from Plotting.UnitTesting import SlicerModelTest
from Plotting.UnitTesting import SlicerParametersTest
from Plotting.UnitTesting import PlotterBaseTest
from Plotting.UnitTesting import PlotterTest
from Plotting.UnitTesting import Plotter2DTest
from Plotting.UnitTesting import QRangeSliderTests
# Calculators
from Calculators.UnitTesting import KiessigCalculatorTest
from Calculators.UnitTesting import DensityCalculatorTest
from Calculators.UnitTesting import GenericScatteringCalculatorTest
from Calculators.UnitTesting import SLDCalculatorTest
from Calculators.UnitTesting import SlitSizeCalculatorTest
from Calculators.UnitTesting import ResolutionCalculatorPanelTest
from Calculators.UnitTesting import DataOperationUtilityTest
# Utilities
from Utilities.UnitTesting import GuiUtilsTest
from Utilities.UnitTesting import SasviewLoggerTest
from Utilities.UnitTesting import GridPanelTest
from Utilities.UnitTesting import ModelEditorTest
from Utilities.UnitTesting import PluginDefinitionTest
from Utilities.UnitTesting import TabbedModelEditorTest
from Utilities.UnitTesting import AddMultEditorTest
from Utilities.UnitTesting import ReportDialogTest
from Utilities.UnitTesting import FileConverterTest
# Unit Testing
from UnitTesting import TestUtilsTest
# Perspectives
# Fitting
from Perspectives.Fitting.UnitTesting import FittingWidgetTest
from Perspectives.Fitting.UnitTesting import FittingPerspectiveTest
from Perspectives.Fitting.UnitTesting import FittingLogicTest
from Perspectives.Fitting.UnitTesting import FittingUtilitiesTest
from Perspectives.Fitting.UnitTesting import FitPageTest
from Perspectives.Fitting.UnitTesting import FittingOptionsTest
from Perspectives.Fitting.UnitTesting import MultiConstraintTest
from Perspectives.Fitting.UnitTesting import ComplexConstraintTest
from Perspectives.Fitting.UnitTesting import ConstraintWidgetTest
# Invariant
from Perspectives.Invariant.UnitTesting import InvariantPerspectiveTest
from Perspectives.Invariant.UnitTesting import InvariantDetailsTest
# Inversion
from Perspectives.Inversion.UnitTesting import InversionPerspectiveTest
# Corfunc
from Perspectives.Corfunc.UnitTesting import CorfuncTest
def plottingSuite():
suites = (
# Plotting
unittest.makeSuite(Plotter2DTest.Plotter2DTest, 'test'),
unittest.makeSuite(PlotHelperTest.PlotHelperTest, 'test'),
unittest.makeSuite(AddTextTest.AddTextTest, 'test'),
unittest.makeSuite(WindowTitleTest.WindowTitleTest, 'test'),
unittest.makeSuite(ScalePropertiesTest.ScalePropertiesTest, 'test'),
unittest.makeSuite(SetGraphRangeTest.SetGraphRangeTest, 'test'),
unittest.makeSuite(LinearFitTest.LinearFitTest, 'test'),
unittest.makeSuite(PlotPropertiesTest.PlotPropertiesTest, 'test'),
unittest.makeSuite(PlotUtilitiesTest.PlotUtilitiesTest, 'test'),
unittest.makeSuite(ColorMapTest.ColorMapTest, 'test'),
unittest.makeSuite(BoxSumTest.BoxSumTest, 'test'),
unittest.makeSuite(SlicerModelTest.SlicerModelTest, 'test'),
unittest.makeSuite(SlicerParametersTest.SlicerParametersTest, 'test'),
unittest.makeSuite(PlotterBaseTest.PlotterBaseTest, 'test'),
unittest.makeSuite(PlotterTest.PlotterTest, 'test'),
unittest.makeSuite(QRangeSliderTests.QRangeSlidersTest, 'test'),
)
return unittest.TestSuite(suites)
def mainSuite():
suites = (
# Main window
unittest.makeSuite(DataExplorerTest.DataExplorerTest, 'test'),
unittest.makeSuite(DroppableDataLoadWidgetTest.DroppableDataLoadWidgetTest, 'test'),
unittest.makeSuite(MainWindowTest.MainWindowTest, 'test'),
unittest.makeSuite(GuiManagerTest.GuiManagerTest, 'test'),
unittest.makeSuite(AboutBoxTest.AboutBoxTest, 'test'),
unittest.makeSuite(WelcomePanelTest.WelcomePanelTest, 'test'),
)
return unittest.TestSuite(suites)
def utilitiesSuite():
suites = (
## Utilities
unittest.makeSuite(TestUtilsTest.TestUtilsTest, 'test'),
unittest.makeSuite(SasviewLoggerTest.SasviewLoggerTest, 'test'),
unittest.makeSuite(GuiUtilsTest.GuiUtilsTest, 'test'),
unittest.makeSuite(GuiUtilsTest.DoubleValidatorTest, 'test'),
unittest.makeSuite(GuiUtilsTest.HashableStandardItemTest, 'test'),
unittest.makeSuite(GridPanelTest.BatchOutputPanelTest, 'test'),
unittest.makeSuite(ModelEditorTest.ModelEditorTest, 'test'),
unittest.makeSuite(PluginDefinitionTest.PluginDefinitionTest, 'test'),
unittest.makeSuite(TabbedModelEditorTest.TabbedModelEditorTest,'test'),
unittest.makeSuite(AddMultEditorTest.AddMultEditorTest, 'test'),
unittest.makeSuite(ReportDialogTest.ReportDialogTest, 'test'),
unittest.makeSuite(FileConverterTest.FileConverterTest, 'test'),
)
return unittest.TestSuite(suites)
def calculatorsSuite():
suites = (
# Calculators
unittest.makeSuite(KiessigCalculatorTest.KiessigCalculatorTest, 'test'),
unittest.makeSuite(DensityCalculatorTest.DensityCalculatorTest, 'test'),
unittest.makeSuite(GenericScatteringCalculatorTest.GenericScatteringCalculatorTest, 'test'),
unittest.makeSuite(SLDCalculatorTest.SLDCalculatorTest, 'test'),
unittest.makeSuite(SlitSizeCalculatorTest.SlitSizeCalculatorTest, 'test'),
unittest.makeSuite(ResolutionCalculatorPanelTest.ResolutionCalculatorPanelTest, 'test'),
unittest.makeSuite(DataOperationUtilityTest.DataOperationUtilityTest, 'test'),
)
return unittest.TestSuite(suites)
def fittingSuite():
suites = (
# Perspectives
# Fitting
unittest.makeSuite(FittingPerspectiveTest.FittingPerspectiveTest, 'test'),
unittest.makeSuite(FittingWidgetTest.FittingWidgetTest, 'test'),
unittest.makeSuite(FittingLogicTest.FittingLogicTest, 'test'),
unittest.makeSuite(FittingUtilitiesTest.FittingUtilitiesTest, 'test'),
unittest.makeSuite(FitPageTest.FitPageTest, 'test'),
unittest.makeSuite(FittingOptionsTest.FittingOptionsTest, 'test'),
unittest.makeSuite(MultiConstraintTest.MultiConstraintTest, 'test'),
unittest.makeSuite(ConstraintWidgetTest.ConstraintWidgetTest, 'test'),
unittest.makeSuite(ComplexConstraintTest.ComplexConstraintTest, 'test'),
)
return unittest.TestSuite(suites)
def perspectivesSuite():
suites = (
# Invariant
unittest.makeSuite(InvariantPerspectiveTest.InvariantPerspectiveTest, 'test'),
unittest.makeSuite(InvariantDetailsTest.InvariantDetailsTest, 'test'),
# Inversion
unittest.makeSuite(InversionPerspectiveTest.InversionTest, 'test'),
# Corfunc
unittest.makeSuite(CorfuncTest.CorfuncTest, 'test'),
)
return unittest.TestSuite(suites)
def invariantPerspectiveSuite():
suites = (
# Invariant only
unittest.makeSuite(InvariantPerspectiveTest.InvariantPerspectiveTest, 'test'),
unittest.makeSuite(InvariantDetailsTest.InvariantDetailsTest, 'test'),
)
return unittest.TestSuite(suites)
def corfuncPerspectiveSuite():
suites = (
# Corfunc only
unittest.makeSuite(CorfuncTest.CorfuncTest, 'test'),
)
return unittest.TestSuite(suites)
def inversionPerspectiveSuite():
suites = (
# Inversion only
unittest.makeSuite(InversionPerspectiveTest.InversionTest, 'test'),
)
return unittest.TestSuite(suites)
if __name__ == "__main__":
user_suites = ALL_SUITES
# Check if user asked for specific suites:
if len(sys.argv) > 1:
user_suites = sys.argv[1:]
errors = {}
for suite in user_suites:
# create the suite object from name
try:
suite_instance = globals()[suite]()
result=unittest.TextTestResult(sys.stdout,True,True)
print("\nRunning %d test cases for %s"%(suite_instance.countTestCases(), suite))
result.buffer=True
suite_instance.run(result)
if not result.wasSuccessful():
if len(result.errors) or len(result.failures):
errors[suite] = (result.errors, result.failures)
if len(result.errors):
print("\n============ Errors disovered ===================")
if len(result.failures):
print("\n============ Failures disovered =================")
else:
print("\nAll tests successful")
except KeyError as ex:
print("Failure : %s "%str(ex))
print("ERROR: Incorrect suite name: %s " % suite)
pass
if len(errors.keys())>0:
for suite, errors in errors.items():
for r in errors[0]:
print("\nSuite: %s had following errors:\n %s : %s"%(suite, r[0], r[1]))
for r in errors[1]:
print("\nSuite: %s had following failures:\n %s : %s"%(suite, r[0], r[1]))
print("=================================================")
print("Exiting with error")
os._exit(1)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/GUITests.py
| 0.466359 | 0.505981 |
GUITests.py
|
pypi
|
import os
import logging
import numpy as np
import matplotlib
import matplotlib.image as mpimg
from PyQt5 import QtWidgets
from sas.sascalc.dataloader.manipulations import reader2D_converter
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Plotting.Plotter2D import Plotter2D
from sas.qtgui.Plotting.PlotterData import Data2D
from sas.sascalc.dataloader.data_info import Detector
# Local UI
from sas.qtgui.Utilities.UI.ImageViewerUI import Ui_ImageViewerUI
from sas.qtgui.Utilities.UI.ImageViewerOptionsUI import Ui_ImageViewerOptionsUI
matplotlib.interactive(False)
class ImageViewer(QtWidgets.QMainWindow, Ui_ImageViewerUI):
"""
Implemented as QMainWindow to enable easy menus
"""
def __init__(self, parent=None):
super(ImageViewer, self).__init__(parent._parent)
self.parent = parent
self.setupUi(self)
# Other globals
self.plotter = None
self.hbox = None
self.filename = None
self.is_png = False
self.image = None
# disable menu items on empty canvas
self.disableMenus()
# set up menu item triggers
self.addTriggers()
def disableMenus(self):
"""
All menu items but "Load File" and "Help" should be disabled
when no data is present
"""
self.actionSave_Image.setEnabled(False)
self.actionPrint_Image.setEnabled(False)
self.actionCopy_Image.setEnabled(False)
self.actionConvert_to_Data.setEnabled(False)
def enableMenus(self):
"""
Enable all menu items when data is present
"""
self.actionSave_Image.setEnabled(True)
self.actionPrint_Image.setEnabled(True)
self.actionCopy_Image.setEnabled(True)
self.actionConvert_to_Data.setEnabled(True)
def addTriggers(self):
"""
Trigger definitions for all menu/toolbar actions.
"""
# File
self.actionLoad_Image.triggered.connect(self.actionLoadImage)
self.actionSave_Image.triggered.connect(self.actionSaveImage)
self.actionPrint_Image.triggered.connect(self.actionPrintImage)
# Edit
self.actionCopy_Image.triggered.connect(self.actionCopyImage)
# Image
self.actionConvert_to_Data.triggered.connect(self.actionConvertToData)
# Help
self.actionHow_To.triggered.connect(self.actionHowTo)
def actionLoadImage(self):
"""
Image loader given files extensions
"""
wildcards = "Images (*.bmp *.gif *jpeg *jpg *.png *tif *.tiff) ;;"\
"Bitmap (*.bmp *.BMP);; "\
"GIF (*.gif *.GIF);; "\
"JPEG (*.jpg *.jpeg *.JPG *.JPEG);; "\
"PNG (*.png *.PNG);; "\
"TIFF (*.tif *.tiff *.TIF *.TIFF);; "\
"All files (*.*)"
filepath = QtWidgets.QFileDialog.getOpenFileName(
self, "Choose a file", "", wildcards)[0]
if filepath:
self.showImage(filepath)
def actionSaveImage(self):
"""
Use the internal MPL method for saving to file
"""
if self.plotter is not None:
self.plotter.onImageSave()
def actionPrintImage(self):
"""
Display printer dialog and print the MPL widget area
"""
if self.plotter is not None:
self.plotter.onImagePrint()
def actionCopyImage(self):
"""
Copy MPL widget area to buffer
"""
if self.plotter is not None:
self.plotter.onClipboardCopy()
def actionConvertToData(self):
"""
Show the options dialog and if accepted, send data to conversion
"""
options = ImageViewerOptions(self)
if options.exec_() != QtWidgets.QDialog.Accepted:
return
(xmin, xmax, ymin, ymax, zscale) = options.getState()
image = self.image
try:
self.convertImage(image, xmin, xmax, ymin, ymax, zscale)
except Exception as ex:
err_msg = "Error occurred while converting Image to Data: " + str(ex)
logging.error(err_msg)
def actionHowTo(self):
''' Send the image viewer help URL to the help viewer '''
location = "/user/qtgui/Calculators/image_viewer_help.html"
self.parent.showHelp(location)
def addPlotter(self):
"""
Add a new plotter to the frame
"""
self.plotter = Plotter2D(self, quickplot=True)
# remove existing layout
if self.hbox is not None:
for i in range(self.hbox.count()):
layout_item = self.hbox.itemAt(i)
self.hbox.removeItem(layout_item)
self.hbox.addWidget(self.plotter)
else:
# add the plotter to the QLayout
self.hbox = QtWidgets.QHBoxLayout()
self.hbox.addWidget(self.plotter)
self.imgFrame.setLayout(self.hbox)
def showImage(self, filename):
"""
Show the requested image in the main frame
"""
self.filename = os.path.basename(filename)
_, extension = os.path.splitext(self.filename)
try:
# Note that matplotlib only reads png natively.
# Any other formats (tiff, jpeg, etc) are passed
# to PIL which seems to have a problem in version
# 1.1.7 that causes a close error which shows up in
# the log file. This does not seem to have any adverse
# effects. PDB --- September 17, 2017.
self.image = mpimg.imread(filename)
self.is_png = extension.lower() == '.png'
self.addPlotter()
ax = self.plotter.ax
flipped_image = np.flipud(self.image)
origin = None
if self.is_png:
origin = 'lower'
self.plotter.imageShow(flipped_image, origin=origin)
if not self.is_png:
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlabel('x [pixel]')
ax.set_ylabel('y [pixel]')
self.plotter.figure.subplots_adjust(left=0.15, bottom=0.1,
right=0.95, top=0.95)
title = 'Picture: ' + self.filename
self.setWindowTitle(title)
self.plotter.draw()
except IOError as ex:
err_msg = "Failed to load '%s'.\n" % self.filename
logging.error(err_msg+str(ex))
return
except Exception as ex:
err_msg = "Failed to show '%s'.\n" % self.filename
logging.error(err_msg+str(ex))
return
# Loading successful - enable menu items
self.enableMenus()
def convertImage(self, rgb, xmin, xmax, ymin, ymax, zscale):
"""
Convert image to data2D
"""
x_len = len(rgb[0])
y_len = len(rgb)
x_vals = np.linspace(xmin, xmax, num=x_len)
y_vals = np.linspace(ymin, ymax, num=y_len)
# Instantiate data object
output = Data2D()
output.filename = self.filename
output.name, _ = os.path.splitext(self.filename)
output.id = output.filename
detector = Detector()
detector.pixel_size.x = None
detector.pixel_size.y = None
# Store the sample to detector distance
detector.distance = None
output.detector.append(detector)
# Initiazed the output data object
output.data = zscale * self.rgb2gray(rgb)
output.err_data = np.zeros([x_len, y_len])
output.mask = np.ones([x_len, y_len], dtype=bool)
output.xbins = x_len
output.ybins = y_len
output.x_bins = x_vals
output.y_bins = y_vals
output.qx_data = np.array(x_vals)
output.qy_data = np.array(y_vals)
output.xmin = xmin
output.xmax = xmax
output.ymin = ymin
output.ymax = ymax
output.xaxis('\\rm{Q_{x}}', r'\AA^{-1}')
output.yaxis('\\rm{Q_{y}}', r'\AA^{-1}')
# Store loading process information
output.meta_data['loader'] = self.filename.split('.')[-1] + "Reader"
output.is_data = True
try:
output = reader2D_converter(output)
except Exception as ex:
err_msg = "Image conversion failed: '%s'.\n" % str(ex)
logging.error(err_msg)
# Create item and add to the data explorer
try:
item = GuiUtils.createModelItemWithPlot(output, output.filename)
self.parent.communicate.updateModelFromPerspectiveSignal.emit(item)
except Exception as ex:
err_msg = "Failed to create new index '%s'.\n" % str(ex)
logging.error(err_msg)
def rgb2gray(self, rgb):
"""
RGB to Grey
"""
if self.is_png:
# png image limits: 0 to 1, others 0 to 255
#factor = 255.0
rgb = rgb[::-1]
if rgb.ndim == 2:
grey = np.rollaxis(rgb, axis=0)
else:
red, green, blue = np.rollaxis(rgb[..., :3], axis=-1)
grey = 0.299 * red + 0.587 * green + 0.114 * blue
max_i = rgb.max()
factor = 255.0/max_i
grey *= factor
return np.array(grey)
class ImageViewerOptions(QtWidgets.QDialog, Ui_ImageViewerOptionsUI):
"""
Logics for the image viewer options UI
"""
def __init__(self, parent=None):
super(ImageViewerOptions, self).__init__(parent)
self.parent = parent
self.setupUi(self)
# fill in defaults
self.addDefaults()
# add validators
self.addValidators()
def addDefaults(self):
"""
Fill out textedits with default values
"""
zscale_default = 1.0
xmin_default = -0.3
xmax_default = 0.3
ymin_default = -0.3
ymax_default = 0.3
self.txtZmax.setText(str(zscale_default))
self.txtXmin.setText(str(xmin_default))
self.txtXmax.setText(str(xmax_default))
self.txtYmin.setText(str(ymin_default))
self.txtYmax.setText(str(ymax_default))
def addValidators(self):
"""
Define simple validators on line edits
"""
self.txtXmin.setValidator(GuiUtils.DoubleValidator())
self.txtXmax.setValidator(GuiUtils.DoubleValidator())
self.txtYmin.setValidator(GuiUtils.DoubleValidator())
self.txtYmax.setValidator(GuiUtils.DoubleValidator())
zvalidator = GuiUtils.DoubleValidator()
zvalidator.setBottom(0.0)
zvalidator.setTop(255.0)
self.txtZmax.setValidator(zvalidator)
def getState(self):
"""
return current state of the widget
"""
zscale = float(self.txtZmax.text())
xmin = float(self.txtXmin.text())
xmax = float(self.txtXmax.text())
ymin = float(self.txtYmin.text())
ymax = float(self.txtYmax.text())
return (xmin, xmax, ymin, ymax, zscale)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Utilities/ImageViewer.py
| 0.452294 | 0.18385 |
ImageViewer.py
|
pypi
|
from PyQt5.QtCore import Qt, QRect, QSize
from PyQt5.QtWidgets import QWidget, QPlainTextEdit, QTextEdit
from PyQt5.QtGui import QColor, QPainter, QTextFormat
class QLineNumberArea(QWidget):
def __init__(self, editor):
super().__init__(editor)
self.codeEditor = editor
def sizeHint(self):
return QSize(self.editor.lineNumberAreaWidth(), 0)
def paintEvent(self, event):
self.codeEditor.lineNumberAreaPaintEvent(event)
class QCodeEditor(QPlainTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
self.lineNumberArea = QLineNumberArea(self)
self.setLineWrapMode(QPlainTextEdit.NoWrap)
self.blockCountChanged.connect(self.updateLineNumberAreaWidth)
self.updateRequest.connect(self.updateLineNumberArea)
self.cursorPositionChanged.connect(self.highlightCurrentLine)
self.updateLineNumberAreaWidth()
def lineNumberAreaWidth(self):
digits = 1
max_value = max(1, self.blockCount())
while max_value >= 10:
max_value /= 10
digits += 1
# line number display width padded with extra pixels.
# Chosen to "look nice", hence magic numbers
space = 3 + self.fontMetrics().width('9') * digits
return space
def updateLineNumberAreaWidth(self):
self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0)
def updateLineNumberArea(self, rect, dy):
if dy:
self.lineNumberArea.scroll(0, dy)
else:
self.lineNumberArea.update(0, rect.y(), self.lineNumberArea.width(), rect.height())
if rect.contains(self.viewport().rect()):
self.updateLineNumberAreaWidth()
def resizeEvent(self, event):
super().resizeEvent(event)
cr = self.contentsRect()
self.lineNumberArea.setGeometry(QRect(cr.left(), cr.top(), self.lineNumberAreaWidth(), cr.height()))
def highlightCurrentLine(self):
extraSelections = []
if not self.isReadOnly():
selection = QTextEdit.ExtraSelection()
lineColor = QColor(Qt.yellow).lighter(160)
selection.format.setBackground(lineColor)
selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selection.cursor = self.textCursor()
selection.cursor.clearSelection()
extraSelections.append(selection)
self.setExtraSelections(extraSelections)
def lineNumberAreaPaintEvent(self, event):
painter = QPainter(self.lineNumberArea)
painter.fillRect(event.rect(), Qt.lightGray)
block = self.firstVisibleBlock()
blockNumber = block.blockNumber()
top = self.blockBoundingGeometry(block).translated(self.contentOffset()).top()
bottom = top + self.blockBoundingRect(block).height()
# Just to make sure I use the right font
height = self.fontMetrics().height()
while block.isValid() and (top <= event.rect().bottom()):
if block.isVisible() and (bottom >= event.rect().top()):
number = str(blockNumber + 1)
painter.setPen(Qt.black)
painter.drawText(0, top, self.lineNumberArea.width(), height, Qt.AlignRight, number)
block = block.next()
top = bottom
bottom = top + self.blockBoundingRect(block).height()
blockNumber += 1
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
codeEditor = QCodeEditor()
codeEditor.show()
sys.exit(app.exec_())
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Utilities/CodeEditor.py
| 0.579757 | 0.236869 |
CodeEditor.py
|
pypi
|
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter
def format(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
# Syntax styles that can be shared by all languages
STYLES = {
'keyword': format('blue'),
'operator': format('red'),
'brace': format('darkGray'),
'defclass': format('black', 'bold'),
'string': format('magenta'),
'string2': format('darkMagenta'),
'comment': format('darkGreen', 'italic'),
'self': format('black', 'italic'),
'numbers': format('brown'),
}
class PythonHighlighter (QSyntaxHighlighter):
"""Syntax highlighter for the Python language.
"""
# Python keywords
python_keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'None', 'True', 'False',
]
# C keywords
c_keywords = [
'auto', 'break', 'case', 'char',
'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern',
'float', 'for', 'goto', 'if',
'int', 'long', 'register', 'return',
'short', 'signed', 'sizeof', 'static',
'struct', 'switch', 'typedef', 'union',
'unsigned', 'void', 'volatile', 'while'
]
# Python operators
operators = [
'=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
'\+', '-', '\*', '/', '//', '\%', '\*\*',
# In-place
'\+=', '-=', '\*=', '/=', '\%=',
# Bitwise
'\^', '\|', '\&', '\~', '>>', '<<',
]
# Python braces
braces = [
'\{', '\}', '\(', '\)', '\[', '\]',
]
def __init__(self, document, is_python=True):
QSyntaxHighlighter.__init__(self, document)
# Multi-line strings (expression, flag, style)
# FIXME: The triple-quotes in these two lines will mess up the
# syntax highlighting from this point onward
self.tri_single = (QRegExp("'''"), 1, STYLES['string2'])
self.tri_double = (QRegExp('"""'), 2, STYLES['string2'])
rules = []
# Keyword, operator, and brace rules
keywords = PythonHighlighter.python_keywords if is_python \
else PythonHighlighter.c_keywords
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in keywords]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in PythonHighlighter.operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in PythonHighlighter.braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, STYLES['self']),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
# From '#' until a newline
(r'#[^\n]*', 0, STYLES['comment']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
# Add "//" to comments for C
if not is_python:
rules.append((r'//[^\n]*', 0, STYLES['comment']),)
# Build a QRegExp for each pattern
self.rules = [(QRegExp(pat), index, fmt)
for (pat, index, fmt) in rules]
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text.
"""
# Do other syntax formatting
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
# We actually want the index of the nth match
index = expression.pos(nth)
length = len(expression.cap(nth))
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
if __name__ == '__main__':
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([])
editor = QtWidgets.QPlainTextEdit()
highlight = PythonHighlighter(editor.document())
editor.show()
# Load syntax.py into the editor for demo purposes
infile = open('PythonSyntax.py', 'r')
editor.setPlainText(infile.read())
app.exec_()
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Utilities/PythonSyntax.py
| 0.507568 | 0.459622 |
PythonSyntax.py
|
pypi
|
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from .UI.FrameSelectUI import Ui_FrameSelect
class FrameSelect(QtWidgets.QDialog, Ui_FrameSelect):
"""
Class to describe the behaviour of the Frame Selector widget
"""
def __init__(self, parent=None, frames=1, isBSL=True):
super(FrameSelect, self).__init__(parent)
self.setupUi(self)
self.n_frames = frames
self.isBSL = isBSL
self.firstFrame = None
self.lastFrame = None
self.increment = None
self.setWindowTitle("Frame Select")
self.addState()
self.addSlots()
self.addText()
def addText(self):
"""
Initialize view
"""
instructions = ("The file you've selected has {} frames. "
"Please select a subset of frames to convert to CanSAS "
"format").format(self.n_frames)
self.lblDescription.setText(instructions)
self.lblDescription.setWordWrap(True)
self.updateDisplay()
if self.isBSL:
self.chkSeparateFiles.setVisible(False)
self.chkSeparateFiles.setEnabled(False)
def addState(self):
"""
Minor bookkeeping
"""
self.firstFrame = 0
self.lastFrame = self.n_frames-1
self.increment = 1
self.updateDisplay()
def addSlots(self):
"""
Describe behaviour of OK and Cancel buttons
"""
self.cmdOK.clicked.connect(self.accept)
self.cmdCancel.clicked.connect(self.reject)
self.txtFirstFrame.setValidator(QtGui.QIntValidator(0, self.n_frames-1))
self.txtLastFrame.setValidator(QtGui.QIntValidator(0, self.n_frames-1))
self.txtIncrement.setValidator(QtGui.QIntValidator())
self.txtFirstFrame.editingFinished.connect(self.onFirstChanged)
self.txtLastFrame.editingFinished.connect(self.onLastChanged)
self.txtIncrement.editingFinished.connect(self.onIncrementChanged)
def updateDisplay(self):
"""
manage model-view sync
"""
self.txtFirstFrame.setText(str(self.firstFrame))
self.txtLastFrame.setText(str(self.lastFrame))
self.txtIncrement.setText(str(self.increment))
def onFirstChanged(self):
"""
Manage view-model sync
"""
self.cmdOK.setEnabled(False)
try:
frame = int(self.txtFirstFrame.text())
except ValueError:
return
if frame > self.lastFrame:
return
if frame < 0:
return
self.firstFrame = frame
self.cmdOK.setEnabled(True)
def onLastChanged(self):
"""
Manage view-model sync
"""
self.cmdOK.setEnabled(False)
try:
frame = int(self.txtLastFrame.text())
except ValueError:
return
if frame < self.firstFrame:
return
if frame < 0:
return
self.lastFrame = frame
self.cmdOK.setEnabled(True)
def onIncrementChanged(self):
"""
Manage view-model sync
"""
self.cmdOK.setEnabled(False)
try:
inc = int(self.txtIncrement.text())
except ValueError:
return
if inc < 0:
return
self.increment = inc
self.cmdOK.setEnabled(True)
def getFrames(self):
"""
Accessor for state values
"""
return (self.firstFrame, self.lastFrame, self.increment)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Utilities/FrameSelect.py
| 0.546254 | 0.179135 |
FrameSelect.py
|
pypi
|
import os
import sys
import json
import logging
from collections import defaultdict, OrderedDict
USER_FILE = 'categories.json'
class CategoryInstaller:
"""
Class for making sure all category stuff is installed
Note - class is entirely static!
"""
def __init__(self):
""" initialization """
@staticmethod
def _get_home_dir():
"""
returns the users sasview config dir
"""
return os.path.join(os.path.expanduser("~"), ".sasview")
@staticmethod
def _regenerate_model_dict(master_category_dict):
"""
regenerates self.by_model_dict which has each model name as the key
and the list of categories belonging to that model
along with the enabled mapping
returns tuplet (by_model_dict, model_enabled_dict)
"""
by_model_dict = defaultdict(list)
model_enabled_dict = defaultdict(bool)
for category in master_category_dict:
for (model, enabled) in master_category_dict[category]:
by_model_dict[model].append(category)
model_enabled_dict[model] = enabled
return (by_model_dict, model_enabled_dict)
@staticmethod
def _regenerate_master_dict(by_model_dict, model_enabled_dict):
"""
regenerates master_category_dict from by_model_dict
and model_enabled_dict
returns the master category dictionary
"""
master_category_dict = defaultdict(list)
for model in by_model_dict:
for category in by_model_dict[model]:
master_category_dict[category].append(\
(model, model_enabled_dict[model]))
return OrderedDict(sorted(list(master_category_dict.items()), key=lambda t: t[0]))
@staticmethod
def get_user_file():
"""
returns the user data file, eg .sasview/categories.json.json
"""
return os.path.join(CategoryInstaller._get_home_dir(), USER_FILE)
@staticmethod
def get_default_file():
logging.warning("CategoryInstaller.get_default_file is deprecated.")
@staticmethod
def check_install(homedir = None, model_list=None):
"""
the main method of this class
makes sure categories.json exists and if not
compile it and install
:param homefile: Override the default home directory
:param model_list: List of model names except customized models
"""
_model_dict = { model.name: model for model in model_list}
_model_list = list(_model_dict.keys())
serialized_file = None
if homedir is None:
serialized_file = CategoryInstaller.get_user_file()
else:
serialized_file = os.path.join(homedir, USER_FILE)
if os.path.isfile(serialized_file):
with open(serialized_file, 'rb') as f:
master_category_dict = json.load(f)
else:
master_category_dict = defaultdict(list)
(by_model_dict, model_enabled_dict) = \
CategoryInstaller._regenerate_model_dict(master_category_dict)
add_list = _model_list
del_name = False
for cat in list(master_category_dict.keys()):
for ind in range(len(master_category_dict[cat])):
model_name, enabled = master_category_dict[cat][ind]
if model_name not in _model_list:
del_name = True
try:
by_model_dict.pop(model_name)
model_enabled_dict.pop(model_name)
except:
logging.error("CategoryInstaller: %s", sys.exc_info()[1])
else:
add_list.remove(model_name)
if del_name or (len(add_list) > 0):
for model in add_list:
model_enabled_dict[model]= True
if _model_dict[model].category is None or len(str(_model_dict[model].category.capitalize())) == 0:
by_model_dict[model].append('Uncategorized')
else:
category = _model_dict[model].category
toks = category.split(':')
category = toks[-1]
toks = category.split('-')
capitalized_words = [t.capitalize() for t in toks]
category = ' '.join(capitalized_words)
by_model_dict[model].append(category)
master_category_dict = \
CategoryInstaller._regenerate_master_dict(by_model_dict,
model_enabled_dict)
json.dump(master_category_dict, open(serialized_file, "w", encoding="utf8"))
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Utilities/CategoryInstaller.py
| 0.417984 | 0.167627 |
CategoryInstaller.py
|
pypi
|
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
import sas.qtgui.path_prepare
import matplotlib as mpl
import numpy
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from sas.qtgui.Plotting.PlotterData import Data2D
from sas.qtgui.Utilities.GuiUtils import formatNumber, DoubleValidator
from .rangeSlider import RangeSlider
DEFAULT_MAP = 'jet'
# Local UI
from sas.qtgui.UI import main_resources_rc
from sas.qtgui.Plotting.UI.ColorMapUI import Ui_ColorMapUI
class ColorMap(QtWidgets.QDialog, Ui_ColorMapUI):
apply_signal = QtCore.pyqtSignal(tuple, str)
def __init__(self, parent=None, cmap=None, vmin=0.0, vmax=100.0, data=None):
super(ColorMap, self).__init__()
self.setupUi(self)
assert(isinstance(data, Data2D))
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.data = data
self._cmap_orig = self._cmap = cmap if cmap is not None else DEFAULT_MAP
self.all_maps = [m for m in mpl.cm.datad]
self.maps = sorted(m for m in self.all_maps if not m.endswith("_r"))
self.rmaps = sorted(set(self.all_maps) - set(self.maps))
self.vmin = self.vmin_orig = int(vmin)
self.vmax = self.vmax_orig = int(vmax)
# Initialize detector labels
self.initDetectorData()
# Initialize the combo box
self.initMapCombobox()
self.initRangeSlider()
# Add the color map component
self.initColorMap()
# Initialize validators on amplitude textboxes
validator_min = DoubleValidator(self.txtMinAmplitude)
validator_min.setNotation(0)
self.txtMinAmplitude.setValidator(validator_min)
validator_max = DoubleValidator(self.txtMaxAmplitude)
validator_max.setNotation(0)
self.txtMaxAmplitude.setValidator(validator_max)
# Set the initial amplitudes
self.txtMinAmplitude.setText(formatNumber(self.vmin))
self.txtMaxAmplitude.setText(formatNumber(self.vmax))
# Enforce constant size on the widget
self.setFixedSize(self.minimumSizeHint())
# Handle combobox changes
self.cbColorMap.currentIndexChanged.connect(self.onMapIndexChange)
# Handle checkbox changes
self.chkReverse.stateChanged.connect(self.onColorMapReversed)
# Handle the Reset button click
self.buttonBox.button(QtWidgets.QDialogButtonBox.Reset).clicked.connect(self.onReset)
# Handle the Apply button click
self.buttonBox.button(QtWidgets.QDialogButtonBox.Apply).clicked.connect(self.onApply)
# Handle the amplitude setup
self.txtMinAmplitude.editingFinished.connect(self.onAmplitudeChange)
self.txtMaxAmplitude.editingFinished.connect(self.onAmplitudeChange)
def cmap(self):
"""
Getter for the color map
"""
return self._cmap
def norm(self):
"""
Getter for the color map norm
"""
return (self._norm.vmin, self._norm.vmax)
def onReset(self):
"""
Respond to the Reset button click
"""
# Go back to original settings
self._cmap = self._cmap_orig
self.vmin = self.vmin_orig
self.vmax = self.vmax_orig
self._norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)
self.txtMinAmplitude.setText(formatNumber(self.vmin))
self.txtMaxAmplitude.setText(formatNumber(self.vmax))
self.initMapCombobox()
self.slider.setMinimum(self.vmin)
self.slider.setMaximum(self.vmax)
self.slider.setLowValue(self.vmin)
self.slider.setHighValue(self.vmax)
# Redraw the widget
self.redrawColorBar()
self.canvas.draw()
def onApply(self):
"""
Respond to the Apply button click.
Send a signal to the plotter with vmin/vmax/cmap for chart update
"""
self.apply_signal.emit(self.norm(), self.cmap())
def initDetectorData(self):
"""
Fill out the Detector labels
"""
xnpts = len(self.data.x_bins)
ynpts = len(self.data.y_bins)
self.lblWidth.setText(formatNumber(xnpts))
self.lblHeight.setText(formatNumber(ynpts))
xmax = max(self.data.xmin, self.data.xmax)
ymax = max(self.data.ymin, self.data.ymax)
qmax = numpy.sqrt(numpy.power(xmax, 2) + numpy.power(ymax, 2))
self.lblQmax.setText(formatNumber(qmax))
self.lblStopRadius.setText(formatNumber(self.data.xmin))
def initMapCombobox(self):
"""
Fill out the combo box with all available color maps
"""
if self._cmap in self.rmaps:
maps = self.rmaps
# Assure correct state of the checkbox
self.chkReverse.setChecked(True)
else:
maps = self.maps
# Assure correct state of the checkbox
self.chkReverse.setChecked(False)
self.cbColorMap.addItems(maps)
# Set the default/passed map
self.cbColorMap.setCurrentIndex(self.cbColorMap.findText(self._cmap))
def initRangeSlider(self):
"""
Create and display the double slider for data range mapping.
"""
self.slider = RangeSlider()
self.slider.setMinimum(self.vmin)
self.slider.setMaximum(self.vmax)
self.slider.setLowValue(self.vmin)
self.slider.setHighValue(self.vmax)
self.slider.setOrientation(QtCore.Qt.Horizontal)
self.slider_label = QtWidgets.QLabel()
self.slider_label.setText("Drag the sliders to adjust color range.")
def set_vmin(value):
self.vmin = int(value)
self.txtMinAmplitude.setText(str(value))
self.updateMap()
def set_vmax(value):
self.vmax = int(value)
self.txtMaxAmplitude.setText(str(value))
self.updateMap()
self.slider.lowValueChanged.connect(set_vmin)
self.slider.highValueChanged.connect(set_vmax)
def updateMap(self):
self._norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)
self.redrawColorBar()
self.canvas.draw()
def initColorMap(self):
"""
Prepare and display the color map
"""
self.fig = mpl.figure.Figure(figsize=(4, 1))
self.ax1 = self.fig.add_axes([0.05, 0.65, 0.9, 0.15])
self._norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)
self.redrawColorBar()
self.canvas = FigureCanvas(self.fig)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.slider_label)
layout.addWidget(self.slider)
layout.addWidget(self.canvas)
self.widget.setLayout(layout)
def onMapIndexChange(self, index):
"""
Respond to the color map change event
"""
new_map = str(self.cbColorMap.itemText(index))
self._cmap = new_map
self.redrawColorBar()
self.canvas.draw()
def redrawColorBar(self):
"""
Call ColorbarBase with current values, effectively redrawing the widget
"""
self.cb = mpl.colorbar.ColorbarBase(self.ax1, cmap=mpl.pyplot.get_cmap(self._cmap),
norm=self._norm,
orientation='horizontal')
self.cb.set_label('Color map range')
def onColorMapReversed(self, isChecked):
"""
Respond to ticking/unticking the color map reverse checkbox
"""
current_map = str(self.cbColorMap.currentText())
if isChecked:
# Add "_r" to map name for the reversed version
new_map = current_map + "_r"
maps = self.rmaps
# Assure the reversed map exists.
if new_map not in maps:
new_map = maps[0]
else:
new_map = current_map[:-2] # "_r" = last two chars
maps = self.maps
# Base map for the reversed map should ALWAYS exist,
# but let's be paranoid here
if new_map not in maps:
new_map = maps[0]
self._cmap = new_map
# Clear the content of the combobox.
# Needs signal blocking, or else onMapIndexChange() spoils it all
self.cbColorMap.blockSignals(True)
self.cbColorMap.clear()
# Add the new set of maps
self.cbColorMap.addItems(maps)
# Remove the signal block before the proper index set
self.cbColorMap.blockSignals(False)
self.cbColorMap.setCurrentIndex(self.cbColorMap.findText(new_map))
def onAmplitudeChange(self):
"""
Respond to editing the amplitude fields
"""
min_amp = self.vmin
max_amp = self.vmax
try:
min_amp = float(self.txtMinAmplitude.text())
except ValueError:
pass
try:
max_amp = float(self.txtMaxAmplitude.text())
except ValueError:
pass
self._norm = mpl.colors.Normalize(vmin=min_amp, vmax=max_amp)
self.redrawColorBar()
self.canvas.draw()
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/ColorMap.py
| 0.712932 | 0.225811 |
ColorMap.py
|
pypi
|
import numpy as np
from typing import Union
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QLineEdit, QTextEdit
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
import sas.qtgui.Utilities.ObjectLibrary as ol
class QRangeSlider(BaseInteractor):
""" Draw a pair of draggable vertical lines. Each line can be linked to a GUI input.
The GUI input should update the lines and vice-versa.
"""
def __init__(self, base, axes, color='black', zorder=5, data=None):
# type: (Plotter, Plotter.ax, str, int, Data1D) -> None
""" Initialize the slideable lines and associated markers """
# Assert the data object is a plottable
assert isinstance(data, Data1D)
# Plotter object
BaseInteractor.__init__(self, base, axes, color=color)
self.base = base
self.markers = []
self.axes = axes
self.data = data
# Track the visibility for toggling the slider on/off
self.is_visible = False
self.connect = self.base.connect
# Min and max x values
self.x_min = np.fabs(min(self.data.x))
self.y_marker_min = self.data.y[np.where(self.data.x == self.x_min)[0][0]]
self.x_max = np.fabs(max(self.data.x))
self.y_marker_max = self.data.y[np.where(self.data.x == self.x_max)[0][-1]]
# Should the inputs update while the bar is actively being dragged?
self.updateOnMove = data.slider_update_on_move
# Draw the lines
self.line_min = LineInteractor(self, axes, zorder=zorder, x=self.x_min, y=self.y_marker_min,
input=data.slider_low_q_input, setter=data.slider_low_q_setter,
getter=data.slider_low_q_getter, perspective=data.slider_perspective_name,
tab=data.slider_tab_name)
self.line_max = LineInteractor(self, axes, zorder=zorder, x=self.x_max, y=self.y_marker_max,
input=data.slider_high_q_input, setter=data.slider_high_q_setter,
getter=data.slider_high_q_getter, perspective=data.slider_perspective_name,
tab=data.slider_tab_name)
self.has_move = True
self.update()
def clear(self):
# type: () -> None
""" Clear this slicer and its markers """
self.clear_markers()
def show(self):
# type: () -> None
""" Show this slicer and its markers """
self.line_max.draw()
self.line_min.draw()
self.update()
def remove(self):
# type: () -> None
""" Remove this slicer and its markers """
self.line_max.remove()
self.line_min.remove()
self.draw()
self.is_visible = False
def update(self, x=None, y=None):
# type: (float, float) -> None
"""Draw the new lines on the graph."""
self.line_min.update(x, y, draw=self.updateOnMove)
self.line_max.update(x, y, draw=self.updateOnMove)
self.base.update()
self.is_visible = True
def save(self, ev):
# type: (QEvent) -> None
""" Remember the position of the lines so that we can restore on Esc. """
self.line_min.save(ev)
self.line_max.save(ev)
def restore(self, ev):
# type: (QEvent) -> None
""" Restore the lines. """
self.line_max.restore(ev)
self.line_min.restore(ev)
def toggle(self):
# type: () -> None
""" Toggle the slider visibility. """
if self.is_visible:
self.remove()
else:
self.show()
def move(self, x, y, ev):
# type: (float, float, QEvent) -> None
""" Process move to a new position, making sure that the move is allowed. """
pass
def clear_markers(self):
# type: () -> None
""" Clear each of the lines individually """
self.line_min.clear()
self.line_max.clear()
def draw(self):
# type: () -> None
""" Update the plot """
self.base.draw()
class LineInteractor(BaseInteractor):
"""
Draw a single vertical line that can be dragged on a plot
"""
def __init__(self, base, axes, color='black', zorder=5, x=0.5, y=0.5,
input=None, setter=None, getter=None, perspective=None, tab=None):
# type: (Plotter, Plotter.ax, str, int, float, float, [str], [str], [str], str, str) -> None
""" Initialize the line interactor object"""
BaseInteractor.__init__(self, base, axes, color=color)
# Plotter object
self.base = base
# Inputs and methods linking this slider to a GUI element so, as one changes, the other also updates
self._input = None
self._setter = None
self._getter = None
# The marker(s) for this line - typically only one
self.markers = []
# The Plotter.ax object
self.axes = axes
# X and Y values used for the line and markers
self.x = x
self.save_x = self.x
self.y_marker = y
self.save_y = self.y_marker
self.draw(zorder)
# Is the slider able to move
self.has_move = True
try:
data_explorer = ol.getObject('DataExplorer')
self.perspective = data_explorer.parent.loadedPerspectives.get(perspective, None)
except AttributeError:
# QRangeSlider is disconnected from GuiManager for testing
self.perspective = None
if self.perspective is None:
return
if tab and hasattr(self.perspective, 'getTabByName'):
# If the perspective is tabbed, set the perspective to the tab this slider in associated with
self.perspective = self.perspective.getTabByName(tab)
if self.perspective:
# Connect the inputs and methods
self.input = self._get_input_or_callback(input)
self.setter = self._get_input_or_callback(setter)
self.getter = self._get_input_or_callback(getter)
self.connect_markers([self.line, self.inner_marker])
self.update(draw=True)
@property
def input(self):
# type: () -> Union[QLineEdit, QTextEdit, None]
""" Get the text input that should be linked to the position of this slider """
return self._input
@input.setter
def input(self, input):
# type: (Union[QLineEdit, QTextEdit, None]) -> None
""" Set the text input that should be linked to the position of this slider """
self._input = input
if self._input:
self._input.editingFinished.connect(self.inputChanged)
@property
def setter(self):
# type: () -> Union[callable, None]
""" Get the x-value setter method associated with this slider """
return self._setter
@setter.setter
def setter(self, setter):
# type: (Union[callable, None]) -> None
""" Set the x-value setter method associated with this slider """
self._setter = setter if callable(setter) else None
@property
def getter(self):
# type: () -> Union[callable, None]
""" Get the x-value getter method associated with this slider """
return self._getter
@getter.setter
def getter(self, getter):
# type: (Union[callable, None]) -> None
""" Set the x-value getter associated with this slider """
self._getter = getter if callable(getter) else None
def clear(self):
# type: () -> None
""" Disconnect any inputs and callbacks and the clear the line and marker """
self.clear_markers()
self.remove()
def remove(self):
# type: () -> None
""" Clear this slicer and its markers """
if self.inner_marker:
self.inner_marker.remove()
if self.line:
self.line.remove()
def draw(self, zorder=5):
# type: (int) -> None
""" Draw the line and marker on the linked plot using the latest x and y values """
# Inner circle marker
self.inner_marker = self.axes.plot([self.x], [self.y_marker], linestyle='', marker='o', markersize=4,
color=self.color, alpha=0.6, pickradius=5, label=None, zorder=zorder,
visible=True)[0]
self.line = self.axes.axvline(self.x, linestyle='-', color=self.color, marker='', pickradius=5,
label=None, zorder=zorder, visible=True)
def _get_input_or_callback(self, connection_list=None):
# type: ([str]) -> Union[QLineEdit, QTextEdit, None]
""" Returns an input or callback method based on a list of inputs/commands """
connection = None
if isinstance(connection_list, list):
connection = self.perspective
for item in connection_list:
try:
connection = getattr(connection, item)
except Exception:
return None
return connection
def _set_q(self, value):
# type: (float) -> None
""" Call the q setter callback method if it exists """
self.x = value
if self.setter and callable(self.setter):
self.setter(value)
elif hasattr(self.input, 'setText'):
self.input.setText(f"{value:.3}")
def _get_q(self):
# type: () -> None
""" Get the q value, inferring the method to get the value """
if self.getter:
# Separate callback method to get Q value
self.x = float(self.getter())
elif hasattr(self.input, 'text'):
# Line edit box
self.x = float(self.input.text())
elif hasattr(self.input, 'getText'):
# Text box
self.x = float(self.input.getText())
def inputChanged(self):
# type: () -> None
""" Track the input linked to the x value for this slider and update as needed """
self._get_q()
self.y_marker = self.base.data.y[(np.abs(self.base.data.x - self.x)).argmin()]
self.update(draw=True)
def update(self, x=None, y=None, draw=False):
# type: (float, float, bool) -> None
""" Update the line position on the graph. """
# Reset x, y -coordinates if given as parameters
if x is not None:
self.x = np.sign(self.x) * np.fabs(x)
if y is not None:
self.y_marker = y
# Draw lines and markers
self.inner_marker.set_xdata([self.x])
self.inner_marker.set_ydata([self.y_marker])
self.line.set_xdata([self.x])
if draw:
self.base.draw()
def save(self, ev):
# type: (QEvent) -> None
""" Remember the position for this line so that we can restore on Esc. """
self.save_x = self.x
self.save_y = self.y_marker
def restore(self, ev):
# type: (QEvent) -> None
""" Restore the position for this line """
self.x = self.save_x
self.y_marker = self.save_y
def move(self, x, y, ev):
# type: (float, float, QEvent) -> None
""" Process move to a new position, making sure that the move is allowed. """
self.has_move = True
self.x = x
if self.base.updateOnMove:
self._set_q(x)
self.y_marker = self.base.data.y[(np.abs(self.base.data.x - self.x)).argmin()]
self.update(draw=self.base.updateOnMove)
def onRelease(self, ev):
# type: (QEvent) -> bool
""" Update the line position when the mouse button is released """
# Set the Q value if a callable setter exists otherwise update the attached input
self._set_q(self.x)
self.update(draw=True)
self.moveend(ev)
return True
def clear_markers(self):
# type: () -> None
""" Disconnect the input and clear the callbacks """
if self.input:
self.input.editingFinished.disconnect(self.inputChanged)
self.setter = None
self.getter = None
self.input = None
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/QRangeSlider.py
| 0.850018 | 0.398026 |
QRangeSlider.py
|
pypi
|
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
import sas.sasview
from sas.qtgui.UI import main_resources_rc
from sas.qtgui.UI import main_resources_rc
from sas.qtgui.Plotting.UI.ScalePropertiesUI import Ui_scalePropertiesUI
x_values = ["x", "x^(2)", "x^(4)", "ln(x)", "log10(x)", "log10(x^(4))"]
y_values = ["y", "1/y", "ln(y)", "y^(2)", "y*x^(2)", "y*x^(4)", "1/sqrt(y)",
"log10(y)", "ln(y*x)", "ln(y*x^(2))", "ln(y*x^(4))", "log10(y*x^(4))"]
view_values = ["--", "Linear y vs x", "log(y) vs log(x)", "Guinier lny vs x^(2)",
"XS Guinier ln(y*x) vs x^(2)", "Porod y*x^(4) vs x^(4)", "Kratky y*x^(2) vs x"]
view_to_xy = {
view_values[0]: [None, None], # custom
view_values[1]: [0, 0], # linear
view_values[2]: [4, 7], # log
view_values[3]: [1, 2], # Guinier
view_values[4]: [1, 8], # XS Guinier
view_values[5]: [2, 5], # Porod
view_values[6]: [0, 4], # Kratky
}
class ScaleProperties(QtWidgets.QDialog, Ui_scalePropertiesUI):
def __init__(self, parent=None, init_scale_x='x', init_scale_y='y'):
super(ScaleProperties, self).__init__(parent)
self.setupUi(self)
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
# Set up comboboxes
self.cbX.addItems(x_values)
self.cbY.addItems(y_values)
self.cbView.addItems(view_values)
# Resize the dialog only AFTER the boxes are populated
self.setFixedSize(self.minimumSizeHint())
# Set up the initial values for x and y.
# This avoids keeping a QModel instance here.
if init_scale_x in x_values and init_scale_y in y_values:
self.cbX.setCurrentIndex(x_values.index(init_scale_x))
self.cbY.setCurrentIndex(y_values.index(init_scale_y))
# Connect combobox index change to a custom method
self.cbView.currentIndexChanged.connect(self.viewIndexChanged)
self.cbX.currentIndexChanged.connect(self.xyIndexChanged)
self.cbY.currentIndexChanged.connect(self.xyIndexChanged)
def getValues(self):
"""
Return current values from comboboxes
"""
return str(self.cbX.currentText()), str(self.cbY.currentText())
def viewIndexChanged(self, index):
"""
Update X and Y labels based on the "View" index
"""
if index > 0:
# Disable signals so xyIndexChanged() doesn't get called
self.cbX.blockSignals(True)
self.cbY.blockSignals(True)
# Update the sub-controls
self.cbX.setCurrentIndex(view_to_xy[view_values[index]][0])
self.cbY.setCurrentIndex(view_to_xy[view_values[index]][1])
# Re-enable the signals
self.cbX.blockSignals(False)
self.cbY.blockSignals(False)
def xyIndexChanged(self, index):
"""
Update View label based on the "X" and "Y" index
"""
self.cbView.setCurrentIndex(0)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/ScaleProperties.py
| 0.517327 | 0.311165 |
ScaleProperties.py
|
pypi
|
import re
import numpy
from typing import Optional
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from sas.qtgui.Utilities.GuiUtils import formatNumber, DoubleValidator
from bumps.fitproblem import FitProblem
from sasmodels.core import load_model
from sasmodels.bumps_model import Model, Experiment
from sas.qtgui.Plotting import DataTransform
from sas.qtgui.Plotting.QRangeSlider import QRangeSlider
import sas.qtgui.Utilities.GuiUtils as GuiUtils
# Local UI
from sas.qtgui.Plotting.UI.LinearFitUI import Ui_LinearFitUI
# TODO: embed into plot window and add data name
# TODO: show/hide linear fit in context menu (retain in plot)
class LinearFit(QtWidgets.QWidget, Ui_LinearFitUI):
updatePlot = QtCore.pyqtSignal(tuple)
def __init__(self, parent=None,
data=None,
max_range=(0.0, 0.0),
fit_range=(0.0, 0.0),
xlabel="",
ylabel=""):
super(LinearFit, self).__init__(parent)
self.setupUi(self)
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
assert(isinstance(max_range, tuple))
assert(isinstance(fit_range, tuple))
self.data = data
self.parent = parent
self.max_range = max_range
# Set fit minimum to 0.0 if below zero
if fit_range[0] < 0.0:
fit_range = (0.0, fit_range[1])
self.fit_range = fit_range
self.xLabel = xlabel
self.yLabel = ylabel
self.rg_on = False
self.rg_yx = False
self.bg_on = False
# Scale dependent content
self.guinier_box.setVisible(False)
if (self.yLabel == "ln(y)" or self.yLabel == "ln(y*x)") and \
(self.xLabel == "x^(2)"):
if self.yLabel == "ln(y*x)":
self.label_12.setText('<html><head/><body><p>Rod diameter [Å]</p></body></html>')
self.rg_yx = True
self.rg_on = True
self.guinier_box.setVisible(True)
if (self.xLabel == "x^(4)") and (self.yLabel == "y*x^(4)"):
self.bg_on = True
self.label_3.setText('Background')
self.x_is_log = self.xLabel == "log10(x)"
self.y_is_log = self.yLabel == "log10(y)"
self.txtFitRangeMin.setValidator(DoubleValidator())
self.txtFitRangeMax.setValidator(DoubleValidator())
# Default values in the line edits
self.txtA.setText("1")
self.txtB.setText("1")
self.txtAerr.setText("0")
self.txtBerr.setText("0")
self.txtChi2.setText("0")
# Initial ranges
self.txtRangeMin.setText(str(max_range[0]))
self.txtRangeMax.setText(str(max_range[1]))
# Assure nice display of ranges
fr_min = GuiUtils.formatNumber(fit_range[0])
fr_max = GuiUtils.formatNumber(fit_range[1])
self.txtFitRangeMin.setText(str(fr_min))
self.txtFitRangeMax.setText(str(fr_max))
# cast xLabel into html
label = re.sub(r'\^\((.)\)(.*)', r'<span style=" vertical-align:super;">\1</span>\2',
str(self.xLabel).rstrip())
self.lblRange.setText('Fit range of ' + label)
# Display the fittings values
self.cstA = 1.0
self.cstB = 1.0
kernel = load_model('line')
self.model = Model(kernel, scale=1.0, background=0, intercept=self.cstA, slope=self.cstB)
self.transform = DataTransform
self.q_sliders = None
self.drawSliders()
self.setFixedSize(self.minimumSizeHint())
# connect Fit button
self.cmdFit.clicked.connect(self.fit)
def setRangeLabel(self, label=""):
"""
Overwrite default fit range label to correspond to actual unit
"""
assert(isinstance(label, str))
self.lblRange.setText(label)
def range(self):
return (float(self.txtFitRangeMin.text()) if float(self.txtFitRangeMin.text()) > 0 else 0.0,
float(self.txtFitRangeMax.text()))
def fit(self, event):
"""
Performs the fit. Receive an event when clicking on
the button Fit.Computes chisqr ,
A and B parameters of the best linear fit y=Ax +B
Push a plottable to the caller
"""
tempx = []
tempy = []
tempdy = []
# Checks to assure data correctness
if len(self.data.view.x) < 2:
return
if not self.checkFitValues(self.txtFitRangeMin):
return
self.xminFit, self.xmaxFit = self.range()
xmin = self.xminFit
xmax = self.xmaxFit
xminView = xmin
xmaxView = xmax
# Set the qmin and qmax in the panel that matches the
# transformed min and max
value_xmin = self.floatInvTransform(xmin)
value_xmax = self.floatInvTransform(xmax)
self.txtRangeMin.setText(formatNumber(value_xmin))
self.txtRangeMax.setText(formatNumber(value_xmax))
tempx, tempy, tempdy = self.origData()
# Find the fitting parameters
tempdy = numpy.asarray(tempdy)
tempdy[tempdy == 0] = 1
if self.x_is_log:
xmin = numpy.log10(xmin)
xmax = numpy.log10(xmax)
M = Experiment(data=self.data, model=self.model)
problem = FitProblem(M)
chisqr, out, cov = (0, 0, 0)
# Use chi2/dof
if len(tempx) > 0:
chisqr = chisqr / len(tempx)
# Check that cov and out are iterable before displaying them
errA = numpy.sqrt(cov[0][0]) if cov is not None else 0
errB = numpy.sqrt(cov[1][1]) if cov is not None else 0
cstA = out[0] if out is not None else 0.0
cstB = out[1] if out is not None else 0.0
# Reset model with the right values of A and B
self.model.setParam('A', float(cstA))
self.model.setParam('B', float(cstB))
tempx = []
tempy = []
y_model = 0.0
# load tempy with the minimum transformation
y_model = self.model.run(xmin)
tempx.append(xminView)
tempy.append(numpy.power(10.0, y_model) if self.y_is_log else y_model)
# load tempy with the maximum transformation
y_model = self.model.run(xmax)
tempx.append(xmaxView)
tempy.append(numpy.power(10.0, y_model) if self.y_is_log else y_model)
# Set the fit parameter display when FitDialog is opened again
self.Avalue = cstA
self.Bvalue = cstB
self.ErrAvalue = errA
self.ErrBvalue = errB
self.Chivalue = chisqr
# Update the widget
self.txtA.setText(formatNumber(self.Avalue))
self.txtAerr.setText(formatNumber(self.ErrAvalue))
self.txtB.setText(formatNumber(self.Bvalue))
self.txtBerr.setText(formatNumber(self.ErrBvalue))
self.txtChi2.setText(formatNumber(self.Chivalue))
# Possibly Guinier analysis
i0 = numpy.exp(cstB)
self.txtGuinier_1.setText(formatNumber(i0))
err = numpy.abs(numpy.exp(cstB) * errB)
self.txtGuinier1_Err.setText(formatNumber(err))
if self.rg_yx:
rg = numpy.sqrt(-2 * float(cstA))
diam = 4 * numpy.sqrt(-float(cstA))
value = formatNumber(diam)
if rg is not None and rg != 0:
err = formatNumber(8 * float(errA) / diam)
else:
err = ''
else:
rg = numpy.sqrt(-3 * float(cstA))
value = formatNumber(rg)
if rg is not None and rg != 0:
err = formatNumber(3 * float(errA) / (2 * rg))
else:
err = ''
self.txtGuinier_2.setText(value)
self.txtGuinier2_Err.setText(err)
value = formatNumber(rg * self.floatInvTransform(self.xminFit))
self.txtGuinier_4.setText(value)
value = formatNumber(rg * self.floatInvTransform(self.xmaxFit))
self.txtGuinier_3.setText(value)
tempx = numpy.array(tempx)
tempy = numpy.array(tempy)
self.clearSliders()
self.updatePlot.emit((tempx, tempy))
self.drawSliders()
def origData(self):
# Store the transformed values of view x, y and dy before the fit
xmin_check = numpy.log10(self.xminFit)
# Local shortcuts
x = self.data.view.x
y = self.data.view.y
dy = self.data.view.dy
if self.y_is_log:
if self.x_is_log:
tempy = [numpy.log10(y[i])
for i in range(len(x)) if x[i] >= xmin_check]
tempdy = [DataTransform.errToLogX(y[i], 0, dy[i], 0)
for i in range(len(x)) if x[i] >= xmin_check]
else:
tempy = list(map(numpy.log10, y))
tempdy = list(map(lambda t1,t2:DataTransform.errToLogX(t1,0,t2,0),y,dy))
else:
tempy = y
tempdy = dy
if self.x_is_log:
tempx = [numpy.log10(x) for x in self.data.view.x if x > xmin_check]
else:
tempx = x
return numpy.array(tempx), numpy.array(tempy), numpy.array(tempdy)
def checkFitValues(self, item):
"""
Check the validity of input values
"""
flag = True
value = item.text()
p_white = item.palette()
p_white.setColor(item.backgroundRole(), QtCore.Qt.white)
p_pink = item.palette()
p_pink.setColor(item.backgroundRole(), QtGui.QColor(255, 128, 128))
item.setAutoFillBackground(True)
# Check for possible values entered
if self.x_is_log:
if float(value) > 0:
item.setPalette(p_white)
else:
flag = False
item.setPalette(p_pink)
return flag
def floatInvTransform(self, x):
"""
transform a float.It is used to determine the x.View min and x.View
max for values not in x. Also used to properly calculate RgQmin,
RgQmax and to update qmin and qmax in the linear range boxes on the
panel.
"""
# TODO: refactor this. This is just a hack to make the
# functionality work without rewritting the whole code
# with good design (which really should be done...).
if self.xLabel == "x":
return x
elif self.xLabel == "x^(2)":
return numpy.sqrt(x)
elif self.xLabel == "x^(4)":
return numpy.sqrt(numpy.sqrt(x))
elif self.xLabel == "log10(x)":
return numpy.power(10.0, x)
elif self.xLabel == "ln(x)":
return numpy.exp(x)
elif self.xLabel == "log10(x^(4))":
return numpy.sqrt(numpy.sqrt(numpy.power(10.0, x)))
return x
def drawSliders(self):
"""Show new Q-range sliders"""
self.data.show_q_range_sliders = True
self.q_sliders = QRangeSlider(self.parent, self.parent.ax, data=self.data)
self.q_sliders.line_min.input = self.txtFitRangeMin
self.q_sliders.line_max.input = self.txtFitRangeMax
# Ensure values are updated on redraw of plots
self.q_sliders.line_min.inputChanged()
self.q_sliders.line_max.inputChanged()
def clearSliders(self):
"""Clear existing sliders"""
if self.q_sliders:
self.q_sliders.clear()
self.data.show_q_range_sliders = False
self.q_sliders = None
def closeEvent(self, ev):
self.clearSliders()
self.parent.update()
def accept(self, ev):
self.close()
def reject(self, ev):
self.close()
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/LineraFitNew.py
| 0.583797 | 0.333096 |
LineraFitNew.py
|
pypi
|
import copy
import numpy
import math
from sas.sascalc.data_util.uncertainty import Uncertainty
from sas.qtgui.Plotting.Plottables import PlottableData1D
from sas.qtgui.Plotting.Plottables import PlottableData2D
from sas.sascalc.dataloader.data_info import Data1D as LoadData1D
from sas.sascalc.dataloader.data_info import Data2D as LoadData2D
class Data1D(PlottableData1D, LoadData1D):
"""
"""
ROLE_DATA=0
ROLE_DEFAULT=1
ROLE_DELETABLE=2
ROLE_RESIDUAL=3
def __init__(self, x=None, y=None, dx=None, dy=None):
"""
"""
if x is None:
x = []
if y is None:
y = []
PlottableData1D.__init__(self, x, y, dx, dy)
LoadData1D.__init__(self, x, y, dx, dy)
self.id = None
self.list_group_id = []
self.group_id = None
self.is_data = True
self.path = None
self.xtransform = None
self.ytransform = None
self.title = ""
self.scale = None
# plot_role:
# 0: data - no reload on param change
# 1: normal lifecycle (fit)
# 2: deletable on model change (Q(I), S(I)...)
# 3: separate chart on Show Plot (residuals)
self.plot_role = Data1D.ROLE_DEFAULT
# Q-range slider definitions
self.show_q_range_sliders = False # Should sliders be shown?
self.slider_update_on_move = True # Should the gui update during the move?
self.slider_perspective_name = "" # Name of the perspective that this slider is associated with
self.slider_tab_name = "" # Name of the tab where the data set is
# The following q-range slider variables are optional but help tie
# the slider to a GUI element for 2-way updates
self.slider_low_q_input = [] # List of attributes that lead to a Qt input to tie a low Q input to the slider
self.slider_high_q_input = [] # List of attributes that lead to a Qt input to tie a high Q input to the slider
# Setters and getters are only needed for inputs that aren't Q values
# e.g. Invariant perspective nPts
self.slider_low_q_setter = [] # List of attributes that lead to a setter to tie a low Q method to the slider
self.slider_low_q_getter = [] # List of attributes that lead to a getter to tie a low Q method to the slider
self.slider_high_q_setter = [] # List of attributes that lead to a setter to tie a high Q method to the slider
self.slider_high_q_getter = [] # List of attributes that lead to a getter to tie a high Q method to the slider
def copy_from_datainfo(self, data1d):
"""
copy values of Data1D of type DataLaoder.Data_info
"""
self.x = copy.deepcopy(data1d.x)
self.y = copy.deepcopy(data1d.y)
self.dy = copy.deepcopy(data1d.dy)
if hasattr(data1d, "dx"):
self.dx = copy.deepcopy(data1d.dx)
if hasattr(data1d, "dxl"):
self.dxl = copy.deepcopy(data1d.dxl)
if hasattr(data1d, "dxw"):
self.dxw = copy.deepcopy(data1d.dxw)
self.xaxis(data1d._xaxis, data1d._xunit)
self.yaxis(data1d._yaxis, data1d._yunit)
self.title = data1d.title
self.isSesans = data1d.isSesans
def __str__(self):
"""
print data
"""
_str = "%s\n" % LoadData1D.__str__(self)
return _str
def _perform_operation(self, other, operation):
"""
"""
# First, check the data compatibility
dy, dy_other = self._validity_check(other)
result = Data1D(x=[], y=[], dx=None, dy=None)
result.clone_without_data(length=len(self.x), clone=self)
result.copy_from_datainfo(data1d=self)
if self.dxw is None:
result.dxw = None
else:
result.dxw = numpy.zeros(len(self.x))
if self.dxl is None:
result.dxl = None
else:
result.dxl = numpy.zeros(len(self.x))
for i in range(len(self.x)):
result.x[i] = self.x[i]
if self.dx is not None and len(self.x) == len(self.dx):
result.dx[i] = self.dx[i]
if self.dxw is not None and len(self.x) == len(self.dxw):
result.dxw[i] = self.dxw[i]
if self.dxl is not None and len(self.x) == len(self.dxl):
result.dxl[i] = self.dxl[i]
a = Uncertainty(self.y[i], dy[i]**2)
if isinstance(other, Data1D):
b = Uncertainty(other.y[i], dy_other[i]**2)
if other.dx is not None:
result.dx[i] *= self.dx[i]
result.dx[i] += (other.dx[i]**2)
result.dx[i] /= 2
result.dx[i] = math.sqrt(result.dx[i])
if result.dxl is not None and other.dxl is not None:
result.dxl[i] *= self.dxl[i]
result.dxl[i] += (other.dxl[i]**2)
result.dxl[i] /= 2
result.dxl[i] = math.sqrt(result.dxl[i])
else:
b = other
output = operation(a, b)
result.y[i] = output.x
result.dy[i] = math.sqrt(math.fabs(output.variance))
return result
def _perform_union(self, other):
"""
"""
# First, check the data compatibility
self._validity_check_union(other)
result = Data1D(x=[], y=[], dx=None, dy=None)
tot_length = len(self.x) + len(other.x)
result = self.clone_without_data(length=tot_length, clone=result)
if self.dy is None or other.dy is None:
result.dy = None
else:
result.dy = numpy.zeros(tot_length)
if self.dx is None or other.dx is None:
result.dx = None
else:
result.dx = numpy.zeros(tot_length)
if self.dxw is None or other.dxw is None:
result.dxw = None
else:
result.dxw = numpy.zeros(tot_length)
if self.dxl is None or other.dxl is None:
result.dxl = None
else:
result.dxl = numpy.zeros(tot_length)
result.x = numpy.append(self.x, other.x)
#argsorting
ind = numpy.argsort(result.x)
result.x = result.x[ind]
result.y = numpy.append(self.y, other.y)
result.y = result.y[ind]
if result.dy is not None:
result.dy = numpy.append(self.dy, other.dy)
result.dy = result.dy[ind]
if result.dx is not None:
result.dx = numpy.append(self.dx, other.dx)
result.dx = result.dx[ind]
if result.dxw is not None:
result.dxw = numpy.append(self.dxw, other.dxw)
result.dxw = result.dxw[ind]
if result.dxl is not None:
result.dxl = numpy.append(self.dxl, other.dxl)
result.dxl = result.dxl[ind]
return result
class Data2D(PlottableData2D, LoadData2D):
"""
"""
def __init__(self, image=None, err_image=None,
qx_data=None, qy_data=None, q_data=None,
mask=None, dqx_data=None, dqy_data=None,
xmin=None, xmax=None, ymin=None, ymax=None,
zmin=None, zmax=None):
"""
"""
PlottableData2D.__init__(self, image=image, err_image=err_image,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
zmin=zmin, zmax=zmax, qx_data=qx_data,
qy_data=qy_data)
LoadData2D.__init__(self, data=image, err_data=err_image,
qx_data=qx_data, qy_data=qy_data,
dqx_data=dqx_data, dqy_data=dqy_data,
q_data=q_data, mask=mask)
self.id = None
self.list_group_id = []
self.group_id = None
self.is_data = True
self.path = None
self.xtransform = None
self.ytransform = None
self.title = ""
self.scale = None
# Always default
self.plot_role = Data1D.ROLE_DEFAULT
def copy_from_datainfo(self, data2d):
"""
copy value of Data2D of type DataLoader.data_info
"""
self.data = copy.deepcopy(data2d.data)
self.qx_data = copy.deepcopy(data2d.qx_data)
self.qy_data = copy.deepcopy(data2d.qy_data)
self.q_data = copy.deepcopy(data2d.q_data)
self.mask = copy.deepcopy(data2d.mask)
self.err_data = copy.deepcopy(data2d.err_data)
self.x_bins = copy.deepcopy(data2d.x_bins)
self.y_bins = copy.deepcopy(data2d.y_bins)
if data2d.dqx_data is not None:
self.dqx_data = copy.deepcopy(data2d.dqx_data)
if data2d.dqy_data is not None:
self.dqy_data = copy.deepcopy(data2d.dqy_data)
self.xmin = data2d.xmin
self.xmax = data2d.xmax
self.ymin = data2d.ymin
self.ymax = data2d.ymax
if hasattr(data2d, "zmin"):
self.zmin = data2d.zmin
if hasattr(data2d, "zmax"):
self.zmax = data2d.zmax
self.xaxis(data2d._xaxis, data2d._xunit)
self.yaxis(data2d._yaxis, data2d._yunit)
self.title = data2d.title
def __str__(self):
"""
print data
"""
_str = "%s\n" % LoadData2D.__str__(self)
return _str
def _perform_operation(self, other, operation):
"""
Perform 2D operations between data sets
:param other: other data set
:param operation: function defining the operation
"""
# First, check the data compatibility
dy, dy_other = self._validity_check(other)
result = Data2D(image=None, qx_data=None, qy_data=None,
q_data=None, err_image=None, xmin=None, xmax=None,
ymin=None, ymax=None, zmin=None, zmax=None)
result.clone_without_data(len(self.data))
result.copy_from_datainfo(data2d=self)
result.xmin = self.xmin
result.xmax = self.xmax
result.ymin = self.ymin
result.ymax = self.ymax
if self.dqx_data is None or self.dqy_data is None:
result.dqx_data = None
result.dqy_data = None
else:
result.dqx_data = numpy.zeros(len(self.data))
result.dqy_data = numpy.zeros(len(self.data))
for i in range(numpy.size(self.data)):
result.data[i] = self.data[i]
if self.err_data is not None and \
numpy.size(self.data) == numpy.size(self.err_data):
result.err_data[i] = self.err_data[i]
if self.dqx_data is not None:
result.dqx_data[i] = self.dqx_data[i]
if self.dqy_data is not None:
result.dqy_data[i] = self.dqy_data[i]
result.qx_data[i] = self.qx_data[i]
result.qy_data[i] = self.qy_data[i]
result.q_data[i] = self.q_data[i]
result.mask[i] = self.mask[i]
a = Uncertainty(self.data[i], dy[i]**2)
if isinstance(other, Data2D):
b = Uncertainty(other.data[i], dy_other[i]**2)
if other.dqx_data is not None and \
result.dqx_data is not None:
result.dqx_data[i] *= self.dqx_data[i]
result.dqx_data[i] += (other.dqx_data[i]**2)
result.dqx_data[i] /= 2
result.dqx_data[i] = math.sqrt(result.dqx_data[i])
if other.dqy_data is not None and \
result.dqy_data is not None:
result.dqy_data[i] *= self.dqy_data[i]
result.dqy_data[i] += (other.dqy_data[i]**2)
result.dqy_data[i] /= 2
result.dqy_data[i] = math.sqrt(result.dqy_data[i])
else:
b = other
output = operation(a, b)
result.data[i] = output.x
result.err_data[i] = math.sqrt(math.fabs(output.variance))
return result
def _perform_union(self, other):
"""
Perform 2D operations between data sets
:param other: other data set
:param operation: function defining the operation
"""
# First, check the data compatibility
self._validity_check_union(other)
result = Data2D(image=None, qx_data=None, qy_data=None,
q_data=None, err_image=None, xmin=None, xmax=None,
ymin=None, ymax=None, zmin=None, zmax=None)
length = len(self.data)
tot_length = length + len(other.data)
result.clone_without_data(tot_length)
result.xmin = self.xmin
result.xmax = self.xmax
result.ymin = self.ymin
result.ymax = self.ymax
if self.dqx_data is None or self.dqy_data is None or \
other.dqx_data is None or other.dqy_data is None :
result.dqx_data = None
result.dqy_data = None
else:
result.dqx_data = numpy.zeros(len(self.data) + \
numpy.size(other.data))
result.dqy_data = numpy.zeros(len(self.data) + \
numpy.size(other.data))
result.data = numpy.append(self.data, other.data)
result.qx_data = numpy.append(self.qx_data, other.qx_data)
result.qy_data = numpy.append(self.qy_data, other.qy_data)
result.q_data = numpy.append(self.q_data, other.q_data)
result.mask = numpy.append(self.mask, other.mask)
if result.err_data is not None:
result.err_data = numpy.append(self.err_data, other.err_data)
if self.dqx_data is not None:
result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
if self.dqy_data is not None:
result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
return result
def check_data_validity(data):
"""
Return True is data is valid enough to compute chisqr, else False
"""
flag = True
if data is not None:
if issubclass(data.__class__, Data2D):
if (data.data is None) or (len(data.data) == 0)\
or (len(data.err_data) == 0):
flag = False
else:
if (data.y is None) or (len(data.y) == 0):
flag = False
if not data.is_data:
flag = False
else:
flag = False
return flag
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/PlotterData.py
| 0.483405 | 0.379206 |
PlotterData.py
|
pypi
|
class Parameter(object):
"""
Class to handle model parameters - sets the parameters and their
initial value from the model based to it.
"""
def __init__(self, model, name, value=None):
self.model = model
self.name = name
if value is not None:
self.model.setParam(self.name, value)
def set(self, value):
"""
Set the value of the parameter
"""
self.model.setParam(self.name, value)
def __call__(self):
"""
Return the current value of the parameter
"""
return self.model.getParam(self.name)
def sasfit(model, pars, x, y, err_y, qmin=None, qmax=None):
"""
Fit function
:param model: sas model object
:param pars: list of parameters
:param x: vector of x data
:param y: vector of y data
:param err_y: vector of y errors
"""
def f(params):
"""
Calculates the vector of residuals for each point
in y for a given set of input parameters.
:param params: list of parameter values
:return: vector of residuals
"""
i = 0
for p in pars:
p.set(params[i])
i += 1
residuals = []
for j in range(len(x)):
if x[j] >= qmin and x[j] <= qmax:
residuals.append((y[j] - model.runXY(x[j])) / err_y[j])
return residuals
def chi2(params):
"""
Calculates chi^2
:param params: list of parameter values
:return: chi^2
"""
sum = 0
res = f(params)
for item in res:
sum += item * item
return sum
# This import takes a long time, which is why it's here not in the top of file
from scipy.optimize import leastsq
p = [param() for param in pars]
out, cov_x, info, mesg, success = leastsq(f, p, full_output=1)
# Calculate chi squared
if len(pars) > 1:
chisqr = chi2(out)
elif len(pars) == 1:
chisqr = chi2([out])
return chisqr, out, cov_x
def calcCommandline(event):
# Testing implementation
# Fit a Line model
from .LineModel import LineModel
line = LineModel()
cstA = Parameter(line, 'A', event.cstA)
cstB = Parameter(line, 'B', event.cstB)
y = line.run()
chisqr, out, cov = sasfit(line, [cstA, cstB], event.x, y, 0)
# print "Output parameters:", out
print("The right answer is [70.0, 1.0]")
print(chisqr, out, cov)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Fittings.py
| 0.798501 | 0.601301 |
Fittings.py
|
pypi
|
import numpy
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets, QtPrintSupport
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rcParams
from packaging import version
DEFAULT_CMAP = mpl.cm.jet
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Plotting.ScaleProperties import ScaleProperties
from sas.qtgui.Plotting.WindowTitle import WindowTitle
from sas.qtgui.Plotting.Binder import BindArtist
import sas.qtgui.Utilities.GuiUtils as GuiUtils
import sas.qtgui.Plotting.PlotHelper as PlotHelper
class PlotterBase(QtWidgets.QWidget):
def __init__(self, parent=None, manager=None, quickplot=False):
super(PlotterBase, self).__init__(parent)
# Required for the communicator
self.manager = manager
self.quickplot = quickplot
# Set auto layout so x/y axis captions don't get cut off
rcParams.update({'figure.autolayout': True})
#plt.style.use('ggplot')
#plt.style.use('seaborn-darkgrid')
# a figure instance to plot on
self.figure = plt.figure()
# Define canvas for the figure to be placed on
self.canvas = FigureCanvas(self.figure)
# Simple window for data display
self.txt_widget = QtWidgets.QTextEdit(None)
# Set the layout and place the canvas widget in it.
layout = QtWidgets.QVBoxLayout()
# FIXME setMargin -> setContentsMargins in qt5 with 4 args
#layout.setContentsMargins(0)
layout.addWidget(self.canvas)
# 1D plotter defaults
self.current_plot = 111
self._data = [] # Original 1D/2D object
self._xscale = 'log'
self._yscale = 'log'
self.qx_data = []
self.qy_data = []
self.color = 0
self.symbol = 0
self.grid_on = False
self.scale = 'linear'
self.x_label = "log10(x)"
self.y_label = "log10(y)"
# Mouse click related
self._scale_xlo = None
self._scale_xhi = None
self._scale_ylo = None
self._scale_yhi = None
self.x_click = None
self.y_click = None
self.event_pos = None
self.leftdown = False
self.gotLegend = 0
self.show_legend = True
# Annotations
self.selectedText = None
self.textList = []
# Create Artist and bind it
self.connect = BindArtist(self.figure)
# Pre-define the Scale properties dialog
self.properties = ScaleProperties(self,
init_scale_x=self.x_label,
init_scale_y=self.y_label)
# default color map
self.cmap = DEFAULT_CMAP
# Add the axes object -> subplot
# TODO: self.ax will have to be tracked and exposed
# to enable subplot specific operations
self.ax = self.figure.add_subplot(self.current_plot)
# Remove this, DAMMIT
self.axes = [self.ax]
# Set the background color to white
self.canvas.figure.set_facecolor('#FFFFFF')
# Canvas event handlers
self.canvas.mpl_connect('button_release_event', self.onMplMouseUp)
self.canvas.mpl_connect('button_press_event', self.onMplMouseDown)
self.canvas.mpl_connect('motion_notify_event', self.onMplMouseMotion)
self.canvas.mpl_connect('pick_event', self.onMplPick)
self.canvas.mpl_connect('scroll_event', self.onMplWheel)
self.contextMenu = QtWidgets.QMenu(self)
self.toolbar = NavigationToolbar(self.canvas, self)
cid = self.canvas.mpl_connect('resize_event', self.onResize)
layout.addWidget(self.toolbar)
if not quickplot:
# Add the toolbar
# self.toolbar.show()
self.toolbar.hide() # hide for the time being
# Notify PlotHelper about the new plot
self.upatePlotHelper()
else:
self.toolbar.hide()
self.setLayout(layout)
@property
def data(self):
""" data getter """
return self._data
@data.setter
def data(self, data=None):
""" Pure virtual data setter """
raise NotImplementedError("Data setter must be implemented in derived class.")
def title(self, title=""):
""" title setter """
self._title = title
# Set the object name to satisfy the Squish object picker
self.canvas.setObjectName(title)
@property
def item(self):
''' getter for this plot's QStandardItem '''
return self._item
@item.setter
def item(self, item=None):
''' setter for this plot's QStandardItem '''
self._item = item
@property
def xLabel(self, xlabel=""):
""" x-label setter """
return self.x_label
@xLabel.setter
def xLabel(self, xlabel=""):
""" x-label setter """
self.x_label = r'$%s$'% xlabel if xlabel else ""
@property
def yLabel(self, ylabel=""):
""" y-label setter """
return self.y_label
@yLabel.setter
def yLabel(self, ylabel=""):
""" y-label setter """
self.y_label = r'$%s$'% ylabel if ylabel else ""
@property
def yscale(self):
""" Y-axis scale getter """
return self._yscale
@yscale.setter
def yscale(self, scale='linear'):
""" Y-axis scale setter """
if version.parse(mpl.__version__) < version.parse("3.3"):
self.ax.set_yscale(scale, nonposy='clip') if scale != 'linear' else self.ax.set_yscale(scale)
else:
self.ax.set_yscale(scale, nonpositive='clip') if scale != 'linear' else self.ax.set_yscale(scale)
self._yscale = scale
@property
def xscale(self):
""" X-axis scale getter """
return self._xscale
@xscale.setter
def xscale(self, scale='linear'):
""" X-axis scale setter """
self.ax.cla()
if version.parse(mpl.__version__) < version.parse("3.3"):
self.ax.set_xscale(scale, nonposx='clip') if scale != 'linear' else self.ax.set_xscale(scale)
else:
self.ax.set_xscale(scale, nonpositive='clip') if scale != 'linear' else self.ax.set_xscale(scale)
self._xscale = scale
@property
def showLegend(self):
""" Legend visibility getter """
return self.show_legend
@showLegend.setter
def showLegend(self, show=True):
""" Legend visibility setter """
self.show_legend = show
def update(self):
self.figure.canvas.draw()
def draw(self):
self.figure.canvas.draw()
def upatePlotHelper(self):
"""
Notify the plot helper about the new plot
"""
# Notify the helper
PlotHelper.addPlot(self)
# Notify the listeners about a new graph
self.manager.communicator.activeGraphsSignal.emit([self, False])
def defaultContextMenu(self):
"""
Content of the dialog-universal context menu:
Save, Print and Copy
"""
# Actions
self.contextMenu.clear()
self.actionSaveImage = self.contextMenu.addAction("Save Image")
self.actionPrintImage = self.contextMenu.addAction("Print Image")
self.actionCopyToClipboard = self.contextMenu.addAction("Copy to Clipboard")
self.contextMenu.addSeparator()
# Define the callbacks
self.actionSaveImage.triggered.connect(self.onImageSave)
self.actionPrintImage.triggered.connect(self.onImagePrint)
self.actionCopyToClipboard.triggered.connect(self.onClipboardCopy)
def createContextMenu(self):
"""
Define common context menu and associated actions for the MPL widget
"""
raise NotImplementedError("Context menu method must be implemented in derived class.")
def createContextMenuQuick(self):
"""
Define context menu and associated actions for the quickplot MPL widget
"""
raise NotImplementedError("Context menu method must be implemented in derived class.")
def onResize(self, event):
"""
Redefine default resize event
"""
pass
def contextMenuEvent(self, event):
"""
Display the context menu
"""
if not self.quickplot:
self.createContextMenu()
else:
self.createContextMenuQuick()
event_pos = event.pos()
self.contextMenu.exec_(self.canvas.mapToGlobal(event_pos))
def onMplMouseUp(self, event):
"""
Mouse button up callback
"""
pass
def onMplMouseDown(self, event):
"""
Mouse button down callback
"""
pass
def onMplMouseMotion(self, event):
"""
Mouse motion callback
"""
pass
def onMplPick(self, event):
"""
Mouse pick callback
"""
pass
def onMplWheel(self, event):
"""
Mouse wheel scroll callback
"""
pass
def clean(self):
"""
Redraw the graph
"""
self.figure.delaxes(self.ax)
self.ax = self.figure.add_subplot(self.current_plot)
def plot(self, marker=None, linestyle=None):
"""
PURE VIRTUAL
Plot the content of self._data
"""
raise NotImplementedError("Plot method must be implemented in derived class.")
def closeEvent(self, event):
"""
Overwrite the close event adding helper notification
"""
self.clearQRangeSliders()
# Please remove me from your database.
PlotHelper.deletePlot(PlotHelper.idOfPlot(self))
# Notify the listeners
self.manager.communicator.activeGraphsSignal.emit([self, True])
event.accept()
def clearQRangeSliders(self):
# Destroy the Q-range sliders in 1D plots
if hasattr(self, 'sliders') and isinstance(self.sliders, dict):
for slider in self.sliders.values():
slider.clear()
self.sliders = {}
def onImageSave(self):
"""
Use the internal MPL method for saving to file
"""
if not hasattr(self, "toolbar"):
self.toolbar = NavigationToolbar(self.canvas, self)
self.toolbar.save_figure()
def onImagePrint(self):
"""
Display printer dialog and print the MPL widget area
"""
# Define the printer
printer = QtPrintSupport.QPrinter()
# Display the print dialog
dialog = QtPrintSupport.QPrintDialog(printer)
dialog.setModal(True)
dialog.setWindowTitle("Print")
if dialog.exec_() != QtWidgets.QDialog.Accepted:
return
painter = QtGui.QPainter(printer)
# Grab the widget screenshot
pmap = QtGui.QPixmap(self.size())
self.render(pmap)
# Create a label with pixmap drawn
printLabel = QtWidgets.QLabel()
printLabel.setPixmap(pmap)
# Print the label
printLabel.render(painter)
painter.end()
def onClipboardCopy(self):
"""
Copy MPL widget area to buffer
"""
bmp = QtWidgets.QApplication.clipboard()
pixmap = QtGui.QPixmap(self.canvas.size())
self.canvas.render(pixmap)
bmp.setPixmap(pixmap)
def onGridToggle(self):
"""
Add/remove grid lines from MPL plot
"""
self.grid_on = (not self.grid_on)
self.ax.grid(self.grid_on)
self.canvas.draw_idle()
def onWindowsTitle(self):
"""
Show a dialog allowing chart title customisation
"""
current_title = self.windowTitle()
titleWidget = WindowTitle(self, new_title=current_title)
result = titleWidget.exec_()
if result != QtWidgets.QDialog.Accepted:
return
title = titleWidget.title()
self.setWindowTitle(title)
# Notify the listeners about a new graph title
self.manager.communicator.activeGraphName.emit((current_title, title))
def onToggleMenu(self):
"""
Toggle navigation menu visibility in the chart
"""
if self.toolbar.isVisible():
self.toolbar.hide()
else:
self.toolbar.show()
def offset_graph(self):
"""
Zoom and offset the graph to the last known settings
"""
for ax in self.axes:
if self._scale_xhi is not None and self._scale_xlo is not None:
ax.set_xlim(self._scale_xlo, self._scale_xhi)
if self._scale_yhi is not None and self._scale_ylo is not None:
ax.set_ylim(self._scale_ylo, self._scale_yhi)
def onDataInfo(self, plot_data):
"""
Displays data info text window for the selected plot
"""
if isinstance(plot_data, Data1D):
text_to_show = GuiUtils.retrieveData1d(plot_data)
else:
text_to_show = GuiUtils.retrieveData2d(plot_data)
# Hardcoded sizes to enable full width rendering with default font
self.txt_widget.resize(420,600)
self.txt_widget.clear()
self.txt_widget.setReadOnly(True)
self.txt_widget.setWindowFlags(QtCore.Qt.Window)
self.txt_widget.setWindowIcon(QtGui.QIcon(":/res/ball.ico"))
self.txt_widget.setWindowTitle("Data Info: %s" % plot_data.filename)
self.txt_widget.insertPlainText(text_to_show)
self.txt_widget.show()
# Move the slider all the way up, if present
vertical_scroll_bar = self.txt_widget.verticalScrollBar()
vertical_scroll_bar.triggerAction(QtWidgets.QScrollBar.SliderToMinimum)
def onSavePoints(self, plot_data):
"""
Saves plot data to a file
"""
if isinstance(plot_data, Data1D):
GuiUtils.saveData1D(plot_data)
else:
GuiUtils.saveData2D(plot_data)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/PlotterBase.py
| 0.544801 | 0.209692 |
PlotterBase.py
|
pypi
|
from PyQt5 import QtGui
from PyQt5 import QtCore
import sas.qtgui.Utilities.GuiUtils as GuiUtils
class SlicerModel(object):
def __init__(self):
# Model representation of local parameters
self._model = QtGui.QStandardItemModel()
self.update_model = True
self._model.itemChanged.connect(self.setParamsFromModelItem)
def setModelFromParams(self):
"""
Set up the Qt model for data handling between controls
"""
parameters = self.getParams()
self._model.removeRows(0, self._model.rowCount())
# Crete/overwrite model items
for parameter in list(parameters.keys()):
item1 = QtGui.QStandardItem(parameter)
if isinstance(parameters[parameter], bool):
item2 = QtGui.QStandardItem(parameters[parameter])
item2.setCheckable(True)
item2.setCheckState(QtCore.Qt.Checked if parameters[parameter] else QtCore.Qt.Unchecked)
else:
item2 = QtGui.QStandardItem(GuiUtils.formatNumber(parameters[parameter]))
self._model.appendRow([item1, item2])
self._model.setHeaderData(0, QtCore.Qt.Horizontal, "Parameter")
self._model.setHeaderData(1, QtCore.Qt.Horizontal, "Value")
def setParamsFromModel(self):
"""
Set up the params dictionary based on the current model content.
"""
params = self.getParams()
for row_index in range(self._model.rowCount()):
# index = self._model.indexFromItem(item)
# row_index = index.row()
param_name = str(self._model.item(row_index, 0).text())
if self._model.item(row_index, 1).isCheckable():
params[param_name] = True if self._model.item(row_index, 1).checkState() == QtCore.Qt.Checked else False
else:
params[param_name] = float(self._model.item(row_index, 1).text())
self.update_model = False
self.setParams(params)
self.update_model = True
def setParamsFromModelItem(self, item):
"""
Set up the params dictionary for the parameter in item.
"""
params = self.getParams()
index = self._model.indexFromItem(item)
row_index = index.row()
param_name = str(self._model.item(row_index, 0).text())
if self._model.item(row_index, 1).isCheckable():
params[param_name] = True if self._model.item(row_index, 1).checkState() == QtCore.Qt.Checked else False
else:
params[param_name] = float(self._model.item(row_index, 1).text())
self.update_model = False
self.setParams(params)
self.update_model = True
def model(self):
'''getter for the model'''
return self._model
def getParams(self):
''' pure virtual '''
raise NotImplementedError("Parameter getter must be implemented in derived class.")
def setParams(self):
''' pure virtual '''
raise NotImplementedError("Parameter setter must be implemented in derived class.")
def validate(self):
''' pure virtual '''
raise NotImplementedError("Validator must be implemented in derived class.")
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/SlicerModel.py
| 0.639849 | 0.154695 |
SlicerModel.py
|
pypi
|
import math
def toX(x, y=None):
"""
This function is used to load value on Plottable.View
:param x: Float value
:return: x
"""
return x
def toX_pos(x, y=None):
"""
This function is used to load value on Plottable.View
:param x: Float value
:return: x
"""
if not x > 0:
raise ValueError("Transformation only accepts positive values.")
else:
return x
def toX2(x, y=None):
"""
This function is used to load value on Plottable.View
Calculate x^(2)
:param x: float value
"""
return x * x
def fromX2(x, y=None):
"""
This function is used to load value on Plottable.View
Calculate square root of x
:param x: float value
"""
if not x >= 0:
raise ValueError("square root of a negative value ")
else:
return math.sqrt(x)
def toX4(x, y=None):
"""
This function is used to load value on Plottable.View
Calculate x^(4)
:param x: float value
"""
return x * x * x * x
def fromX4(x, y=None):
"""
This function is used to load value on Plottable.View
Calculate square root of x
:param x: float value
"""
if not x >= 0:
raise ValueError("double square root of a negative value ")
else:
return math.sqrt(math.sqrt(x))
def toLogX(x, y=None):
"""
This function is used to load value on Plottable.View
calculate log x
:param x: float value
"""
if not x > 0:
raise ValueError("Log(x)of a negative value ")
else:
return math.log(x)
def toOneOverX(x, y=None):
"""
"""
if x != 0:
return 1 / x
else:
raise ValueError("cannot divide by zero")
def toOneOverSqrtX(y, x=None):
"""
"""
if y > 0:
return 1 / math.sqrt(y)
else:
raise ValueError("transform.toOneOverSqrtX: cannot be computed")
def toLogYX2(y, x):
"""
"""
if (y * (x ** 2)) > 0:
return math.log(y * (x ** 2))
else:
raise ValueError("transform.toLogYX2: cannot be computed")
def toLogYX4(y, x):
"""
"""
if (math.pow(x, 4) * y) > 0:
return math.log(math.pow(x, 4) * y)
else:
raise ValueError("transform.toLogYX4: input error")
def toYX4(y, x):
"""
"""
return math.pow(x, 4) * y
def toYX2(y, x):
"""
"""
return math.pow(x, 2) * y
def toLogXY(y, x):
"""
This function is used to load value on Plottable.View
calculate log x
:param x: float value
"""
if not (x * y) > 0:
raise ValueError("Log(X*Y)of a negative value ")
else:
return math.log(x * y)
def errToX(x, y=None, dx=None, dy=None):
"""
calculate error of x**2
:param x: float value
:param dx: float value
"""
if dx is None:
dx = 0
return dx
def errToX_pos(x, y=None, dx=None, dy=None):
"""
calculate error of x**2
:param x: float value
:param dx: float value
"""
if dx is None:
dx = 0
return dx
def errToX2(x, y=None, dx=None, dy=None):
"""
calculate error of x**2
:param x: float value
:param dx: float value
"""
if dx is not None:
err = 2 * x * dx
return math.fabs(err)
else:
return 0.0
def errFromX2(x, y=None, dx=None, dy=None):
"""
calculate error of sqrt(x)
:param x: float value
:param dx: float value
"""
if x > 0:
if dx is not None:
err = dx / (2 * math.sqrt(x))
else:
err = 0
return math.fabs(err)
else:
msg = "transform.errFromX2: can't compute error of negative x"
raise ValueError(msg)
def errToX4(x, y=None, dx=None, dy=None):
"""
calculate error of x**4
:param x: float value
:param dx: float value
"""
if dx is not None:
err = 4 * math.pow(x, 3) * dx
return math.fabs(err)
else:
return 0.0
def errFromX4(x, y=None, dx=None, dy=None):
"""
calculate error of x^1/4
:param x: float value
:param dx: float value
"""
if x > 0:
if dx is not None:
err = dx / (4 * math.pow(x, 3 / 4))
else:
err = 0
return math.fabs(err)
else:
msg = "transform.errFromX4: can't compute error of negative x"
raise ValueError(msg)
def errToLog10X(x, y=None, dx=None, dy=None):
"""
calculate error of Log(x)
:param x: float value
:param dx: float value
"""
if dx is None:
dx = 0
# Check that the point on the graph is positive
# within errors
if not (x - dx) > 0:
msg = "Transformation does not accept"
msg += " point that are consistent with zero."
raise ValueError(msg)
if x != 0:
dx = dx / (x * math.log(10))
else:
raise ValueError("errToLogX: divide by zero")
return dx
def errToLogX(x, y=None, dx=None, dy=None):
"""
calculate error of Log(x)
:param x: float value
:param dx: float value
"""
if dx is None:
dx = 0
# Check that the x point on the graph is zero
if x != 0:
dx = dx / x
else:
raise ValueError("errToLogX: divide by zero")
return dx
def errToYX2(y, x, dy=None, dx=None):
"""
"""
if dx is None:
dx = 0
if dy is None:
dy = 0
err = math.sqrt((2 * x * y * dx) ** 2 + ((x ** 2) * dy) ** 2)
return err
def errToLogXY(x, y, dx=None, dy=None):
"""
calculate error of Log(xy)
"""
# Check that the point on the graph is positive
# within errors
if not (x - dx) > 0 or not (y - dy) > 0:
msg = "Transformation does not accept point "
msg += " that are consistent with zero."
raise ValueError(msg)
if x != 0 and y != 0:
if dx is None:
dx = 0
if dy is None:
dy = 0
err = (dx / x) ** 2 + (dy / y) ** 2
else:
raise ValueError("cannot compute this error")
return math.sqrt(math.fabs(err))
def errToLogYX2(y, x, dy=None, dx=None):
"""
calculate error of Log(yx**2)
"""
# Check that the point on the graph is positive
# within errors
if not (x - dx) > 0 or not (y - dy) > 0:
msg = "Transformation does not accept point"
msg += " that are consistent with zero."
raise ValueError(msg)
if x > 0 and y > 0:
if dx is None:
dx = 0
if dy is None:
dy = 0
err = (2.0 * dx / x) ** 2 + (dy / y) ** 2
else:
raise ValueError("cannot compute this error")
return math.sqrt(math.fabs(err))
def errOneOverX(x, y=None, dx=None, dy=None):
"""
calculate error on 1/x
"""
if x != 0:
if dx is None:
dx = 0
err = dx / x ** 2
else:
raise ValueError("Cannot compute this error")
return math.fabs(err)
def errOneOverSqrtX(x, y=None, dx=None, dy=None):
"""
Calculate error on 1/sqrt(x)
"""
if x > 0:
if dx is None:
dx = 0
err = -1 / 2 * math.pow(x, -3.0 / 2.0) * dx
else:
raise ValueError("Cannot compute this error")
return math.fabs(err)
def errToLogYX4(y, x, dy=None, dx=None):
"""
error for ln(y*x^(4))
:param x: float value
"""
# Check that the point on the graph is positive
# within errors
if (not (x - dx) > 0) or (not (y - dy) > 0):
msg = "Transformation does not accept point "
msg += " that are consistent with zero."
raise ValueError(msg)
if dx is None:
dx = 0
if dy is None:
dy = 0
err = math.sqrt((4.0 * dx / x) ** 2 + (dy / y) ** 2)
return err
def errToYX4(y, x, dy=None, dx=None):
"""
error for (y*x^(4))
:param x: float value
"""
# Check that the point on the graph is positive
# within errors
if dx is None:
dx = 0
if dy is None:
dy = 0
err = math.sqrt((dy * pow(x, 4)) ** 2 + (4 * y * dx * math.pow(x, 3)) ** 2)
return err
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/DataTransform.py
| 0.797083 | 0.840652 |
DataTransform.py
|
pypi
|
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from sas.qtgui.Plotting.PlotUtilities import COLORS, SHAPES
from sas.qtgui.Plotting.UI.PlotPropertiesUI import Ui_PlotPropertiesUI
class PlotProperties(QtWidgets.QDialog, Ui_PlotPropertiesUI):
""" Dialog for modification of single plot properties """
def __init__(self,
parent=None,
color=0,
marker=0,
marker_size=5,
legend=""):
super(PlotProperties, self).__init__(parent)
self.setupUi(self)
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.setFixedSize(self.minimumSizeHint())
self._parent = parent
self._marker = marker if marker else 0
self._color = color if color else 0
self._legend = legend
self._markersize = marker_size if marker_size else 3
self.custom_color = False if isinstance(self._color, int) else True
# Fill out the color combobox
self.cbColor.addItems(list(COLORS.keys())[:-1])
# data1d.custom_color can now be a simple integer,
# specifying COLORS dict index or a string containing
# the hex RGB value, e.g. #00FF00
if isinstance(self._color, int):
self.cbColor.setCurrentIndex(self._color)
else:
# Need the Custom entry here. "Custom" is always last.
self.cbColor.addItems([list(COLORS.keys())[-1]])
self.cbColor.setCurrentIndex(list(COLORS.keys()).index("Custom"))
# Fill out the marker combobox
self.cbShape.addItems(list(SHAPES.keys()))
try:
self.cbShape.setCurrentIndex(self._marker)
except TypeError:
marker_index = self.cbShape.findText(self._marker)
self.cbShape.setCurrentIndex(marker_index)
if self._legend:
self.txtLegend.setText(self._legend)
self.sbSize.setValue(self._markersize)
# Connect slots
self.cmdCustom.clicked.connect(self.onColorChange)
self.cbColor.currentIndexChanged.connect(self.onColorIndexChange)
def legend(self):
''' return current legend '''
return str(self.txtLegend.text())
def marker(self):
''' return the current shape index in SHAPE '''
return self.cbShape.currentIndex()
def markersize(self):
''' return marker size (int) '''
return self.sbSize.value()
def color(self):
''' return current color: index in COLORS or a hex string '''
if self.custom_color:
return self._color
else:
return self.cbColor.currentIndex()
def onColorChange(self):
"""
Pop up the standard Qt color change dialog
"""
# Pick up the chosen color
proposed_color = QtWidgets.QColorDialog.getColor(parent=self)
# Update the text control
if proposed_color.isValid():
# Block currentIndexChanged
self.cbColor.blockSignals(True)
# Add Custom to the color combo box
self.cbColor.addItems(["Custom"])
self.cbColor.setCurrentIndex(list(COLORS.keys()).index("Custom"))
# unblock currentIndexChanged
self.cbColor.blockSignals(False)
# Save the color as #RRGGBB
self.custom_color = True
self._color = str(proposed_color.name())
def onColorIndexChange(self):
"""
Dynamically add/remove "Custom" color index
"""
# Changed index - assure Custom is deleted
custom_index = self.cbColor.findText("Custom")
self.custom_color = False
if custom_index > -1:
self.cbColor.removeItem(custom_index)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/PlotProperties.py
| 0.749271 | 0.162247 |
PlotProperties.py
|
pypi
|
import math
class LineModel(object):
"""
Class that evaluates a linear model.
$f(x) = Ax + B$
List of default parameters:
A = 1.0
B = 1.0
"""
def __init__(self):
""" Initialization """
# # Name of the model
self.name = "LineModel"
# # Define parameters
self.params = {}
self.params['A'] = 1.0
self.params['B'] = 1.0
# # Parameter details [units, min, max]
self.details = {}
self.details['A'] = ['', None, None]
self.details['B'] = ['', None, None]
def getParam(self, name):
"""
Return parameter value
"""
return self.params[name.upper()]
def setParam(self, name, value):
"""
Set parameter value
"""
self.params[name.upper()] = value
def _line(self, x):
"""
Evaluate the function
:param x: x-value
:return: function value
"""
return (self.params['A'] * x) + self.params['B']
def run(self, x=0.0):
"""
Evaluate the model
:param x: simple value
:return: (Line value)
.. note::
This is the function called by fitDialog to calculate the
the y(xmin) and y(xmax), but the only difference between this and
runXY is when the if statement is true. I however cannot see what that
function is for. It needs to be documented here or removed.
-PDB 7/10/16
"""
if x.__class__.__name__ == 'list':
return self._line(x[0] * math.cos(x[1])) * \
self._line(x[0] * math.sin(x[1]))
elif x.__class__.__name__ == 'tuple':
msg = "Tuples are not allowed as input to BaseComponent models"
raise ValueError(msg)
else:
return self._line(x)
def runXY(self, x=0.0):
"""
Evaluate the model.
:param x: simple value
:return: Line value
..note::
This is to be what is called by fitDialog for the actual fit
but the only difference between this and run is when the if
statement is true. I however cannot see what that function
is for. It needs to be documented here or removed. -PDB 7/10/16
"""
if x.__class__.__name__ == 'list':
return self._line(x[0]) * self._line(x[1])
elif x.__class__.__name__ == 'tuple':
msg = "Tuples are not allowed as input to BaseComponent models"
raise ValueError(msg)
else:
return self._line(x)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/LineModel.py
| 0.741674 | 0.421254 |
LineModel.py
|
pypi
|
from functools import partial
import copy
import numpy as np
from PyQt5 import QtWidgets, QtCore
from sas.qtgui.Plotting.PlotterData import Data2D
# Local UI
from sas.qtgui.UI import main_resources_rc
from sas.qtgui.Plotting.UI.MaskEditorUI import Ui_MaskEditorUI
from sas.qtgui.Plotting.Plotter2D import Plotter2DWidget
from sas.qtgui.Plotting.Masks.SectorMask import SectorMask
from sas.qtgui.Plotting.Masks.BoxMask import BoxMask
from sas.qtgui.Plotting.Masks.CircularMask import CircularMask
class MaskEditor(QtWidgets.QDialog, Ui_MaskEditorUI):
def __init__(self, parent=None, data=None):
super(MaskEditor, self).__init__()
assert isinstance(data, Data2D)
self.setupUi(self)
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.data = data
self.parent = parent
filename = data.name
self.current_slicer = None
self.slicer_mask = None
self.setWindowTitle("Mask Editor for %s" % filename)
self.plotter = Plotter2DWidget(self, manager=parent, quickplot=True)
self.plotter.data = self.data
self.slicer_z = 0
self.default_mask = copy.deepcopy(data.mask)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.frame.setLayout(layout)
self.plotter.plot()
layout.addWidget(self.plotter)
self.subplot = self.plotter.ax
# update mask
self.updateMask(self.default_mask)
self.initializeSignals()
def initializeSignals(self):
"""
Attach slots to signals from radio boxes
"""
self.rbWings.toggled.connect(partial(self.onMask, slicer=SectorMask, inside=True))
self.rbCircularDisk.toggled.connect(partial(self.onMask, slicer=CircularMask, inside=True))
self.rbRectangularDisk.toggled.connect(partial(self.onMask, slicer=BoxMask, inside=True))
self.rbDoubleWingWindow.toggled.connect(partial(self.onMask, slicer=SectorMask, inside=False))
self.rbCircularWindow.toggled.connect(partial(self.onMask, slicer=CircularMask, inside=False))
self.rbRectangularWindow.toggled.connect(partial(self.onMask, slicer=BoxMask, inside=False))
# Button groups defined so we can uncheck all buttons programmatically
self.buttonGroup = QtWidgets.QButtonGroup()
self.buttonGroup.addButton(self.rbWings)
self.buttonGroup.addButton(self.rbCircularDisk)
self.buttonGroup.addButton(self.rbRectangularDisk)
self.buttonGroup.addButton(self.rbDoubleWingWindow)
self.buttonGroup.addButton(self.rbCircularWindow)
self.buttonGroup.addButton(self.rbRectangularWindow)
# Push buttons
self.cmdAdd.clicked.connect(self.onAdd)
self.cmdReset.clicked.connect(self.onReset)
self.cmdClear.clicked.connect(self.onClear)
def emptyRadioButtons(self):
"""
Uncheck all buttons without them firing signals causing unnecessary slicer updates
"""
self.buttonGroup.setExclusive(False)
self.rbWings.blockSignals(True)
self.rbWings.setChecked(False)
self.rbWings.blockSignals(False)
self.rbCircularDisk.blockSignals(True)
self.rbCircularDisk.setChecked(False)
self.rbCircularDisk.blockSignals(False)
self.rbRectangularDisk.blockSignals(True)
self.rbRectangularDisk.setChecked(False)
self.rbRectangularDisk.blockSignals(False)
self.rbDoubleWingWindow.blockSignals(True)
self.rbDoubleWingWindow.setChecked(False)
self.rbDoubleWingWindow.blockSignals(False)
self.rbCircularWindow.blockSignals(True)
self.rbCircularWindow.setChecked(False)
self.rbCircularWindow.blockSignals(False)
self.rbRectangularWindow.blockSignals(True)
self.rbRectangularWindow.setChecked(False)
self.rbRectangularWindow.blockSignals(False)
self.buttonGroup.setExclusive(True)
def setSlicer(self, slicer):
"""
Clear the previous slicer and create a new one.
slicer: slicer class to create
"""
# Clear current slicer
if self.current_slicer is not None:
self.current_slicer.clear()
# Create a new slicer
self.slicer_z += 1
self.current_slicer = slicer(self, self.ax, zorder=self.slicer_z)
self.ax.set_ylim(self.data.ymin, self.data.ymax)
self.ax.set_xlim(self.data.xmin, self.data.xmax)
# Draw slicer
self.figure.canvas.draw()
self.current_slicer.update()
def onMask(self, slicer=None, inside=True):
"""
Clear the previous mask and create a new one.
"""
self.clearSlicer()
# modifying data in-place
self.slicer_z += 1
self.current_slicer = slicer(self.plotter, self.plotter.ax, zorder=self.slicer_z, side=inside)
self.plotter.ax.set_ylim(self.data.ymin, self.data.ymax)
self.plotter.ax.set_xlim(self.data.xmin, self.data.xmax)
self.plotter.canvas.draw()
self.slicer_mask = self.current_slicer.update()
def update(self):
"""
Redraw the canvas
"""
self.plotter.draw()
def onAdd(self):
"""
Generate required mask and modify underlying DATA
"""
if self.current_slicer is None:
return
data = Data2D()
data = self.data
self.slicer_mask = self.current_slicer.update()
data.mask = self.data.mask & self.slicer_mask
self.updateMask(data.mask)
self.emptyRadioButtons()
def onClear(self):
"""
Remove the current mask(s)
"""
self.slicer_z += 1
self.clearSlicer()
self.current_slicer = BoxMask(self.plotter, self.plotter.ax,
zorder=self.slicer_z, side=True)
self.plotter.ax.set_ylim(self.data.ymin, self.data.ymax)
self.plotter.ax.set_xlim(self.data.xmin, self.data.xmax)
self.data.mask = copy.deepcopy(self.default_mask)
# update mask plot
self.updateMask(self.data.mask)
self.emptyRadioButtons()
def onReset(self):
"""
Removes all the masks from data
"""
self.slicer_z += 1
self.clearSlicer()
self.current_slicer = BoxMask(self.plotter, self.plotter.ax,
zorder=self.slicer_z, side=True)
self.plotter.ax.set_ylim(self.data.ymin, self.data.ymax)
self.plotter.ax.set_xlim(self.data.xmin, self.data.xmax)
mask = np.ones(len(self.data.mask), dtype=bool)
self.data.mask = mask
# update mask plot
self.updateMask(mask)
self.emptyRadioButtons()
def clearSlicer(self):
"""
Clear the slicer on the plot
"""
if self.current_slicer is None:
return
self.current_slicer.clear()
self.plotter.draw()
self.current_slicer = None
def updateMask(self, mask):
"""
Respond to changes in masking
"""
# the case of litle numbers of True points
if len(mask[mask]) < 10 and self.data is not None:
self.data.mask = copy.deepcopy(self.default_mask)
else:
self.default_mask = mask
# make temperary data to plot
temp_mask = np.zeros(len(mask))
temp_data = copy.deepcopy(self.data)
# temp_data default is None
# This method is to distinguish between masked point and data point = 0.
temp_mask = temp_mask / temp_mask
temp_mask[mask] = temp_data.data[mask]
temp_data.data[mask == False] = temp_mask[mask == False]
if self.current_slicer is not None:
self.current_slicer.clear()
self.current_slicer = None
# modify imshow data
self.plotter.plot(data=temp_data, update=True)
self.plotter.draw()
self.subplot = self.plotter.ax
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/MaskEditor.py
| 0.579995 | 0.248175 |
MaskEditor.py
|
pypi
|
import numpy
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
from sas.qtgui.Plotting.Slicers.AnnulusSlicer import RingInteractor
class CircularMask(BaseInteractor):
"""
Draw a ring Given a radius
"""
def __init__(self, base, axes, color='grey', zorder=3, side=None):
"""
:param: the color of the line that defined the ring
:param r: the radius of the ring
:param sign: the direction of motion the the marker
"""
BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.is_inside = side
self.qmax = min(numpy.fabs(self.data.xmax),
numpy.fabs(self.data.xmin)) # must be positive
self.connect = self.base.connect
# Cursor position of Rings (Left(-1) or Right(1))
self.xmaxd = self.data.xmax
self.xmind = self.data.xmin
if (self.xmaxd + self.xmind) > 0:
self.sign = 1
else:
self.sign = -1
# Inner circle
self.outer_circle = RingInteractor(self, self.axes, 'blue',
zorder=zorder + 1, r=self.qmax / 1.8,
sign=self.sign)
self.outer_circle.qmax = self.qmax * 1.2
self.update()
self._post_data()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
self.outer_circle.clear()
self.base.connect.clearall()
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# Update locations
self.outer_circle.update()
self._post_data()
out = self._post_data()
return out
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.outer_circle.save(ev)
def _post_data(self):
"""
Uses annulus parameters to plot averaged data into 1D data.
:param nbins: the number of points to plot
"""
# Data to average
data = self.data
# If we have no data, just return
if data is None:
return
mask = data.mask
from sas.sascalc.dataloader.manipulations import Ringcut
rmin = 0
rmax = numpy.fabs(self.outer_circle.get_radius())
# Create the data1D Q average of data2D
mask = Ringcut(r_min=rmin, r_max=rmax)
if self.is_inside:
out = (mask(data) == False)
else:
out = (mask(data))
return out
def moveend(self, ev):
"""
Called when any dragging motion ends.
Post an event (type =SlicerParameterEvent)
to plotter 2D with a copy slicer parameters
Call _post_data method
"""
#self.base.thaw_axes()
# create a 1D data plot
self._post_data()
def restore(self):
"""
Restore the roughness for this layer.
"""
self.outer_circle.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
pass
def getParams(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["outer_radius"] = numpy.fabs(self.outer_circle._inner_mouse_x)
return params
def setParams(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
outer = numpy.fabs(params["outer_radius"])
# Update the picture
self.outer_circle.set_cursor(outer, self.outer_circle._inner_mouse_y)
# Post the data given the nbins entered by the user
self._post_data()
def draw(self):
self.base.update()
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Masks/CircularMask.py
| 0.763131 | 0.499756 |
CircularMask.py
|
pypi
|
interface_color = 'black'
disable_color = 'gray'
active_color = 'red'
rho_color = 'black'
mu_color = 'green'
P_color = 'blue'
theta_color = 'orange'
profile_colors = [rho_color, mu_color, P_color, theta_color]
class BaseInteractor(object):
"""
Share some functions between the interface interactor and various layer
interactors.
Individual interactors need the following functions:
save(ev) - save the current state for later restore
restore() - restore the old state
move(x,y,ev) - move the interactor to position x,y
moveend(ev) - end the drag event
update() - draw the interactors
The following are provided by the base class:
connect_markers(markers) - register callbacks for all markers
clear_markers() - remove all items in self.markers
onHilite(ev) - enter/leave event processing
onLeave(ev) - enter/leave event processing
onClick(ev) - mouse click: calls save()
onRelease(ev) - mouse click ends: calls moveend()
onDrag(ev) - mouse move: calls move() or restore()
onKey(ev) - keyboard move: calls move() or restore()
Interactor attributes:
base - model we are operating on
axes - axes holding the interactor
color - color of the interactor in non-active state
markers - list of handles for the interactor
"""
def __init__(self, base, axes, color='black'):
"""
"""
self.base = base
self.axes = axes
self.color = color
self.clickx = None
self.clicky = None
self.markers = []
if isinstance(base.data, list):
self.data = self.base.data[0]
else:
self.data = self.base.data
def clear_markers(self):
"""
Clear old markers and interfaces.
"""
for h in self.markers: h.remove()
if self.markers:
self.base.connect.clear(*self.markers)
self.markers = []
def save(self, ev):
"""
"""
pass
def restore(self, ev):
"""
"""
pass
def move(self, x, y, ev):
"""
"""
pass
def moveend(self, ev):
"""
"""
pass
def connect_markers(self, markers):
"""
Connect markers to callbacks
"""
for h in markers:
connect = self.base.connect
connect('enter', h, self.onHilite)
connect('leave', h, self.onLeave)
connect('click', h, self.onClick)
connect('release', h, self.onRelease)
connect('drag', h, self.onDrag)
connect('key', h, self.onKey)
def onHilite(self, ev):
"""
Hilite the artist reporting the event, indicating that it is
ready to receive a click.
"""
ev.artist.set_color(active_color)
self.base.draw()
return True
def onLeave(self, ev):
"""
Restore the artist to the original colour when the cursor leaves.
"""
ev.artist.set_color(self.color)
self.base.draw()
return True
def onClick(self, ev):
"""
Prepare to move the artist. Calls save() to preserve the state for
later restore().
"""
self.clickx, self.clicky = ev.xdata, ev.ydata
self.save(ev)
return True
def onRelease(self, ev):
"""
"""
self.moveend(ev)
return True
def onDrag(self, ev):
"""
Move the artist. Calls move() to update the state, or restore() if
the mouse leaves the window.
"""
inside, _ = self.axes.contains(ev)
if inside:
self.clickx, self.clicky = ev.xdata, ev.ydata
self.move(ev.xdata, ev.ydata, ev)
else:
self.restore(ev)
self.base.update()
return True
def onKey(self, ev):
"""
Respond to keyboard events. Arrow keys move the widget. Escape
restores it to the position before the last click.
Calls move() to update the state. Calls restore() on escape.
"""
if ev.key == 'escape':
self.restore(ev)
elif ev.key in ['up', 'down', 'right', 'left']:
dx, dy = self.dpixel(self.clickx, self.clicky, nudge=ev.control)
if ev.key == 'up':
self.clicky += dy
elif ev.key == 'down':
self.clicky -= dy
elif ev.key == 'right':
self.clickx += dx
else: self.clickx -= dx
self.move(self.clickx, self.clicky, ev)
else:
return False
self.base.update()
return True
def dpixel(self, x, y, nudge=False):
"""
Return the step size in data coordinates for a small
step in screen coordinates. If nudge is False (default)
the step size is one pixel. If nudge is True, the step
size is 0.2 pixels.
"""
ax = self.axes
px, py = ax.transData.inverse_xy_tup((x, y))
if nudge:
nx, ny = ax.transData.xy_tup((px + 0.2, py + 0.2))
else:
nx, ny = ax.transData.xy_tup((px + 1.0, py + 1.0))
dx, dy = nx - x, ny - y
return dx, dy
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/BaseInteractor.py
| 0.754734 | 0.261925 |
BaseInteractor.py
|
pypi
|
import numpy as np
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
class RadiusInteractor(BaseInteractor):
"""
Select an annulus through a 2D plot
"""
def __init__(self, base, axes, color='black', zorder=5, arc1=None,
arc2=None, theta=np.pi / 8):
"""
"""
_BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.r1 = arc1.get_radius()
self.r2 = arc2.get_radius()
self.theta = theta
self.save_theta = theta
self.move_stop = False
self.theta_left = None
self.theta_right = None
self.arc1 = arc1
self.arc2 = arc2
x1 = self.r1 * np.cos(self.theta)
y1 = self.r1 * np.sin(self.theta)
x2 = self.r2 * np.cos(self.theta)
y2 = self.r2 * np.sin(self.theta)
self.line = self.axes.plot([x1, x2], [y1, y2],
linestyle='-', marker='',
color=self.color,
visible=True)[0]
self.phi = theta
self.npts = 20
self.has_move = False
self.connect_markers([self.line])
self.update()
def set_layer(self, n):
"""
"""
self.layernum = n
self.update()
def clear(self):
"""
"""
self.clear_markers()
try:
self.line.remove()
except:
# Old version of matplotlib
for item in range(len(self.axes.lines)):
del self.axes.lines[0]
def get_angle(self):
"""
"""
return self.theta
def update(self, r1=None, r2=None, theta=None):
"""
Draw the new roughness on the graph.
"""
if r1 is not None:
self.r1 = r1
if r2 is not None:
self.r2 = r2
if theta is not None:
self.theta = theta
x1 = self.r1 * np.cos(self.theta)
y1 = self.r1 * np.sin(self.theta)
x2 = self.r2 * np.cos(self.theta)
y2 = self.r2 * np.sin(self.theta)
self.line.set(xdata=[x1, x2], ydata=[y1, y2])
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.save_theta = np.arctan2(ev.y, ev.x)
self.base.freeze_axes()
def moveend(self, ev):
"""
"""
self.has_move = False
self.base.moveend(ev)
def restore(self, ev):
"""
Restore the roughness for this layer.
"""
self.theta = self.save_theta
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self.theta = np.arctan2(y, x)
self.has_move = True
self.base.base.update()
def set_cursor(self, r_min, r_max, theta):
"""
"""
self.theta = theta
self.r1 = r_min
self.r2 = r_max
self.update()
def get_params(self):
"""
"""
params = {}
params["radius1"] = self.r1
params["radius2"] = self.r2
params["theta"] = self.theta
return params
def set_params(self, params):
"""
"""
x1 = params["radius1"]
x2 = params["radius2"]
theta = params["theta"]
self.set_cursor(x1, x2, theta)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/RadiusInteractor.py
| 0.683208 | 0.350477 |
RadiusInteractor.py
|
pypi
|
import numpy
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from .BaseInteractor import BaseInteractor
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Utilities.GuiUtils import formatNumber
from sas.qtgui.Plotting.SlicerModel import SlicerModel
class AnnulusInteractor(BaseInteractor, SlicerModel):
"""
Select an annulus through a 2D plot.
This interactor is used to average 2D data with the region
defined by 2 radius.
this class is defined by 2 Ringinterators.
"""
def __init__(self, base, axes, item=None, color='black', zorder=3):
BaseInteractor.__init__(self, base, axes, color=color)
SlicerModel.__init__(self)
self.markers = []
self.axes = axes
self.base = base
self._item = item
self.qmax = max(numpy.fabs(self.data.xmax),
numpy.fabs(self.data.xmin)) # must be positive
self.dqmin = min(numpy.fabs(self.data.qx_data))
self.connect = self.base.connect
# Number of points on the plot
self.nbins = 100
# Cursor position of Rings (Left(-1) or Right(1))
self.xmaxd = self.data.xmax
self.xmind = self.data.xmin
if (self.xmaxd + self.xmind) > 0:
self.sign = 1
else:
self.sign = -1
# Inner circle
self.inner_circle = RingInteractor(self, self.axes,
zorder=zorder,
r=self.qmax / 2.0, sign=self.sign)
self.inner_circle.qmax = self.qmax
self.outer_circle = RingInteractor(self, self.axes,
zorder=zorder + 1, r=self.qmax / 1.8,
sign=self.sign)
self.outer_circle.qmax = self.qmax * 1.2
self.update()
self._post_data()
self.setModelFromParams()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
self.outer_circle.clear()
self.inner_circle.clear()
self.base.connect.clearall()
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# Update locations
self.inner_circle.update()
self.outer_circle.update()
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.inner_circle.save(ev)
self.outer_circle.save(ev)
def _post_data(self, nbins=None):
"""
Uses annulus parameters to plot averaged data into 1D data.
:param nbins: the number of points to plot
"""
# Data to average
data = self.data
if data is None:
return
from sas.sascalc.dataloader.manipulations import Ring
rmin = min(numpy.fabs(self.inner_circle.get_radius()),
numpy.fabs(self.outer_circle.get_radius()))
rmax = max(numpy.fabs(self.inner_circle.get_radius()),
numpy.fabs(self.outer_circle.get_radius()))
if nbins is not None:
self.nbins = nbins
# Create the data1D Q average of data2D
sect = Ring(r_min=rmin, r_max=rmax, nbins=self.nbins)
sector = sect(self.data)
if hasattr(sector, "dxl"):
dxl = sector.dxl
else:
dxl = None
if hasattr(sector, "dxw"):
dxw = sector.dxw
else:
dxw = None
new_plot = Data1D(x=(sector.x - numpy.pi) * 180 / numpy.pi,
y=sector.y, dy=sector.dy)
new_plot.dxl = dxl
new_plot.dxw = dxw
new_plot.name = "AnnulusPhi" + "(" + self.data.name + ")"
new_plot.title = "AnnulusPhi" + "(" + self.data.name + ")"
new_plot.source = self.data.source
new_plot.interactive = True
new_plot.detector = self.data.detector
# If the data file does not tell us what the axes are, just assume...
new_plot.xaxis("\\rm{\phi}", 'degrees')
new_plot.yaxis("\\rm{Intensity} ", "cm^{-1}")
if hasattr(data, "scale") and data.scale == 'linear' and \
self.data.name.count("Residuals") > 0:
new_plot.ytransform = 'y'
new_plot.yaxis("\\rm{Residuals} ", "/")
new_plot.group_id = "AnnulusPhi" + self.data.name
new_plot.id = "AnnulusPhi" + self.data.name
new_plot.is_data = True
new_plot.xtransform = "x"
new_plot.ytransform = "y"
item = self._item
if self._item.parent() is not None:
item = self._item.parent()
GuiUtils.updateModelItemWithPlot(item, new_plot, new_plot.id)
self.base.manager.communicator.plotUpdateSignal.emit([new_plot])
self.base.manager.communicator.forcePlotDisplaySignal.emit([item, new_plot])
if self.update_model:
self.setModelFromParams()
self.draw()
def validate(self, param_name, param_value):
"""
Test the proposed new value "value" for row "row" of parameters
"""
#Set minimum difference in outer/inner ring to ensure data exists in annulus
MIN_DIFFERENCE = self.dqmin
isValid = True
if param_name == 'inner_radius':
# First, check the closeness
if numpy.fabs(param_value - self.getParams()['outer_radius']) < MIN_DIFFERENCE:
print("Inner and outer radii too close. Please adjust.")
isValid = False
elif param_value > self.qmax:
print("Inner radius exceeds maximum range. Please adjust.")
isValid = False
elif param_name == 'outer_radius':
# First, check the closeness
if numpy.fabs(param_value - self.getParams()['inner_radius']) < MIN_DIFFERENCE:
print("Inner and outer radii too close. Please adjust.")
isValid = False
elif param_value > self.qmax:
print("Outer radius exceeds maximum range. Please adjust.")
isValid = False
elif param_name == 'nbins':
# Can't be 0
if param_value < 1:
print("Number of bins cannot be less than or equal to 0. Please adjust.")
isValid = False
return isValid
def moveend(self, ev):
"""
Called when any dragging motion ends.
Redraw the plot with new parameters.
"""
self._post_data(self.nbins)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.inner_circle.restore()
self.outer_circle.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
pass
def getParams(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["inner_radius"] = numpy.fabs(self.inner_circle._inner_mouse_x)
params["outer_radius"] = numpy.fabs(self.outer_circle._inner_mouse_x)
params["nbins"] = self.nbins
return params
def setParams(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
inner = numpy.fabs(params["inner_radius"])
outer = numpy.fabs(params["outer_radius"])
self.nbins = int(params["nbins"])
# Update the picture
self.inner_circle.set_cursor(inner, self.inner_circle._inner_mouse_y)
self.outer_circle.set_cursor(outer, self.outer_circle._inner_mouse_y)
# Post the data given the nbins entered by the user
self._post_data(self.nbins)
def draw(self):
"""
"""
self.base.draw()
class RingInteractor(BaseInteractor):
"""
Draw a ring Given a radius
"""
def __init__(self, base, axes, color='black', zorder=5, r=1.0, sign=1):
"""
:param: the color of the line that defined the ring
:param r: the radius of the ring
:param sign: the direction of motion the the marker
"""
BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
# Current radius of the ring
self._inner_mouse_x = r
# Value of the center of the ring
self._inner_mouse_y = 0
# previous value of that radius
self._inner_save_x = r
# Save value of the center of the ring
self._inner_save_y = 0
# Class instantiating RingIterator class
self.base = base
# the direction of the motion of the marker
self.sign = sign
# # Create a marker
# Inner circle marker
x_value = [self.sign * numpy.fabs(self._inner_mouse_x)]
self.inner_marker = self.axes.plot(x_value, [0], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
pickradius=5, label="pick",
zorder=zorder,
visible=True)[0]
# Draw a circle
[self.inner_circle] = self.axes.plot([], [], linestyle='-', marker='', color=self.color)
# The number of points that make the ring line
self.npts = 40
self.connect_markers([self.inner_marker])
self.update()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
self.inner_marker.remove()
self.inner_circle.remove()
def get_radius(self):
"""
:return self._inner_mouse_x: the current radius of the ring
"""
return self._inner_mouse_x
def update(self):
"""
Draw the new roughness on the graph.
"""
# Plot inner circle
x = []
y = []
for i in range(self.npts):
phi = 2.0 * numpy.pi / (self.npts - 1) * i
xval = 1.0 * self._inner_mouse_x * numpy.cos(phi)
yval = 1.0 * self._inner_mouse_x * numpy.sin(phi)
x.append(xval)
y.append(yval)
self.inner_marker.set(xdata=[self.sign * numpy.fabs(self._inner_mouse_x)],
ydata=[0])
self.inner_circle.set_data(x, y)
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self._inner_save_x = self._inner_mouse_x
self._inner_save_y = self._inner_mouse_y
def moveend(self, ev):
"""
Called after a dragging motion
"""
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self._inner_mouse_x = self._inner_save_x
self._inner_mouse_y = self._inner_save_y
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self._inner_mouse_x = x
self._inner_mouse_y = y
self.base.base.update()
def set_cursor(self, x, y):
"""
draw the ring given x, y value
"""
self.move(x, y, None)
self.update()
def getParams(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["radius"] = numpy.fabs(self._inner_mouse_x)
return params
def setParams(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
x = params["radius"]
self.set_cursor(x, self._inner_mouse_y)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/AnnulusSlicer.py
| 0.72594 | 0.249973 |
AnnulusSlicer.py
|
pypi
|
import numpy as np
from sas.qtgui.Plotting.Slicers.Arc import ArcInteractor
from sas.qtgui.Plotting.Slicers.RadiusInteractor import RadiusInteractor
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
class SectorInteractor(BaseInteractor):
"""
Select an annulus through a 2D plot
"""
def __init__(self, base, axes, color='black', zorder=3):
"""
"""
BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.qmax = self.data.xmax
self.connect = self.base.connect
# # Number of points on the plot
self.nbins = 100
theta1 = 2 * np.pi / 3
theta2 = -2 * np.pi / 3
# Inner circle
self.inner_circle = ArcInteractor(self, self.base.subplot,
zorder=zorder,
r=self.qmax / 2.0,
theta1=theta1,
theta2=theta2)
self.inner_circle.qmax = self.qmax
self.outer_circle = ArcInteractor(self, self.base.subplot,
zorder=zorder + 1,
r=self.qmax / 1.8,
theta1=theta1,
theta2=theta2)
self.outer_circle.qmax = self.qmax * 1.2
# self.outer_circle.set_cursor(self.base.qmax/1.8, 0)
self.right_edge = RadiusInteractor(self, self.base.subplot,
zorder=zorder + 1,
arc1=self.inner_circle,
arc2=self.outer_circle,
theta=theta1)
self.left_edge = RadiusInteractor(self, self.base.subplot,
zorder=zorder + 1,
arc1=self.inner_circle,
arc2=self.outer_circle,
theta=theta2)
self.update()
self._post_data()
def set_layer(self, n):
"""
"""
self.layernum = n
self.update()
def clear(self):
"""
"""
self.clear_markers()
self.outer_circle.clear()
self.inner_circle.clear()
self.right_edge.clear()
self.left_edge.clear()
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# Update locations
if self.inner_circle.has_move:
# print "inner circle has moved"
self.inner_circle.update()
r1 = self.inner_circle.get_radius()
r2 = self.outer_circle.get_radius()
self.right_edge.update(r1, r2)
self.left_edge.update(r1, r2)
if self.outer_circle.has_move:
# print "outer circle has moved"
self.outer_circle.update()
r1 = self.inner_circle.get_radius()
r2 = self.outer_circle.get_radius()
self.left_edge.update(r1, r2)
self.right_edge.update(r1, r2)
if self.right_edge.has_move:
# print "right edge has moved"
self.right_edge.update()
self.inner_circle.update(theta1=self.right_edge.get_angle(),
theta2=None)
self.outer_circle.update(theta1=self.right_edge.get_angle(),
theta2=None)
if self.left_edge.has_move:
# print "left Edge has moved"
self.left_edge.update()
self.inner_circle.update(theta1=None,
theta2=self.left_edge.get_angle())
self.outer_circle.update(theta1=None,
theta2=self.left_edge.get_angle())
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.base.freeze_axes()
self.inner_circle.save(ev)
self.outer_circle.save(ev)
self.right_edge.save(ev)
self.left_edge.save(ev)
def _post_data(self):
pass
def post_data(self, new_sector):
""" post data averaging in Q"""
if self.inner_circle.get_radius() < self.outer_circle.get_radius():
rmin = self.inner_circle.get_radius()
rmax = self.outer_circle.get_radius()
else:
rmin = self.outer_circle.get_radius()
rmax = self.inner_circle.get_radius()
if self.right_edge.get_angle() < self.left_edge.get_angle():
phimin = self.right_edge.get_angle()
phimax = self.left_edge.get_angle()
else:
phimin = self.left_edge.get_angle()
phimax = self.right_edge.get_angle()
sect = new_sector(r_min=rmin, r_max=rmax,
phi_min=phimin, phi_max=phimax)
sector = sect(self.data)
from sas.qtgui.Plotting.PlotterData import Data1D
if hasattr(sector, "dxl"):
dxl = sector.dxl
else:
dxl = None
if hasattr(sector, "dxw"):
dxw = sector.dxw
else:
dxw = None
new_plot = Data1D(x=sector.x, y=sector.y, dy=sector.dy,
dxl=dxl, dxw=dxw)
new_plot.name = str(new_sector.__name__) + \
"(" + self.data.name + ")"
new_plot.source = self.data.source
new_plot.interactive = True
# print "loader output.detector",output.source
new_plot.detector = self.data.detector
# If the data file does not tell us what the axes are, just assume...
new_plot.xaxis("\\rm{Q}", 'rad')
new_plot.yaxis("\\rm{Intensity} ", "cm^{-1}")
new_plot.group_id = str(new_sector.__name__) + self.data.name
def validate(self, param_name, param_value):
"""
Test the proposed new value "value" for row "row" of parameters
"""
# Here, always return true
return True
def moveend(self, ev):
#TODO: why is this empty?
pass
def restore(self):
"""
Restore the roughness for this layer.
"""
self.inner_circle.restore()
self.outer_circle.restore()
self.right_edge.restore()
self.left_edge.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
"""
"""
pass
def get_params(self):
"""
"""
params = {}
params["r_min"] = self.inner_circle.get_radius()
params["r_max"] = self.outer_circle.get_radius()
params["phi_min"] = self.right_edge.get_angle()
params["phi_max"] = self.left_edge.get_angle()
params["nbins"] = self.nbins
return params
def set_params(self, params):
"""
"""
# print "setparams on main slicer ",params
inner = params["r_min"]
outer = params["r_max"]
phi_min = params["phi_min"]
phi_max = params["phi_max"]
self.nbins = int(params["nbins"])
self.inner_circle.set_cursor(inner, phi_min, phi_max, self.nbins)
self.outer_circle.set_cursor(outer, phi_min, phi_max, self.nbins)
self.right_edge.set_cursor(inner, outer, phi_min)
self.left_edge.set_cursor(inner, outer, phi_max)
self._post_data()
def freeze_axes(self):
"""
"""
self.base.freeze_axes()
def thaw_axes(self):
"""
"""
self.base.thaw_axes()
def draw(self):
"""
"""
self.base.draw()
class SectorInteractorQ(SectorInteractor):
"""
"""
def __init__(self, base, axes, color='black', zorder=3):
"""
"""
SectorInteractor.__init__(self, base, axes, color=color)
self.base = base
self._post_data()
def _post_data(self):
"""
"""
from sas.sascalc.dataloader.manipulations import SectorQ
self.post_data(SectorQ)
class SectorInteractorPhi(SectorInteractor):
"""
"""
def __init__(self, base, axes, color='black', zorder=3):
"""
"""
SectorInteractor.__init__(self, base, axes, color=color)
self.base = base
self._post_data()
def _post_data(self):
"""
"""
from sas.sascalc.dataloader.manipulations import SectorPhi
self.post_data(SectorPhi)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/AzimutSlicer.py
| 0.530723 | 0.202286 |
AzimutSlicer.py
|
pypi
|
import numpy
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
from sas.qtgui.Plotting.PlotterData import Data1D
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Plotting.SlicerModel import SlicerModel
class BoxInteractor(BaseInteractor, SlicerModel):
"""
BoxInteractor define a rectangle that return data1D average of Data2D
in a rectangle area defined by -x, x ,y, -y
"""
def __init__(self, base, axes, item=None, color='black', zorder=3):
BaseInteractor.__init__(self, base, axes, color=color)
SlicerModel.__init__(self)
# Class initialization
self.markers = []
self.axes = axes
self._item = item
# connecting artist
self.connect = self.base.connect
# which direction is the preferred interaction direction
self.direction = None
# determine x y values
self.x = 0.5 * min(numpy.fabs(self.data.xmax),
numpy.fabs(self.data.xmin))
self.y = 0.5 * min(numpy.fabs(self.data.xmax),
numpy.fabs(self.data.xmin))
# when reach qmax reset the graph
self.qmax = max(self.data.xmax, self.data.xmin,
self.data.ymax, self.data.ymin)
# Number of points on the plot
self.nbins = 100
# If True, I(|Q|) will be return, otherwise,
# negative q-values are allowed
self.fold = True
# reference of the current Slab averaging
self.averager = None
# Create vertical and horizaontal lines for the rectangle
self.vertical_lines = VerticalLines(self,
self.axes,
color='blue',
zorder=zorder,
y=self.y,
x=self.x)
self.vertical_lines.qmax = self.qmax
self.horizontal_lines = HorizontalLines(self,
self.axes,
color='green',
zorder=zorder,
x=self.x,
y=self.y)
self.horizontal_lines.qmax = self.qmax
# draw the rectangle and plost the data 1D resulting
# of averaging data2D
self.update()
self._post_data()
self.setModelFromParams()
def update_and_post(self):
"""
Update the slicer and plot the resulting data
"""
self.update()
self._post_data()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.averager = None
self.clear_markers()
self.horizontal_lines.clear()
self.vertical_lines.clear()
self.base.connect.clearall()
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# #Update the slicer if an horizontal line is dragged
if self.horizontal_lines.has_move:
self.horizontal_lines.update()
self.vertical_lines.update(y=self.horizontal_lines.y)
# #Update the slicer if a vertical line is dragged
if self.vertical_lines.has_move:
self.vertical_lines.update()
self.horizontal_lines.update(x=self.vertical_lines.x)
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.vertical_lines.save(ev)
self.horizontal_lines.save(ev)
def _post_data(self):
pass
def post_data(self, new_slab=None, nbins=None, direction=None):
"""
post data averaging in Qx or Qy given new_slab type
:param new_slab: slicer that determine with direction to average
:param nbins: the number of points plotted when averaging
:param direction: the direction of averaging
"""
if self.direction is None:
self.direction = direction
x_min = -1 * numpy.fabs(self.vertical_lines.x)
x_max = numpy.fabs(self.vertical_lines.x)
y_min = -1 * numpy.fabs(self.horizontal_lines.y)
y_max = numpy.fabs(self.horizontal_lines.y)
if nbins is not None:
self.nbins = nbins
if self.averager is None:
if new_slab is None:
msg = "post data:cannot average , averager is empty"
raise ValueError(msg)
self.averager = new_slab
if self.direction == "X":
if self.fold:
x_low = 0
else:
x_low = numpy.fabs(x_min)
bin_width = (x_max + x_low) / self.nbins
elif self.direction == "Y":
if self.fold:
y_low = 0
else:
y_low = numpy.fabs(y_min)
bin_width = (y_max + y_low) / self.nbins
else:
msg = "post data:no Box Average direction was supplied"
raise ValueError(msg)
# # Average data2D given Qx or Qy
box = self.averager(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max,
bin_width=bin_width)
box.fold = self.fold
boxavg = box(self.data)
# 3 Create Data1D to plot
if hasattr(boxavg, "dxl"):
dxl = boxavg.dxl
else:
dxl = None
if hasattr(boxavg, "dxw"):
dxw = boxavg.dxw
else:
dxw = None
new_plot = Data1D(x=boxavg.x, y=boxavg.y, dy=boxavg.dy)
new_plot.dxl = dxl
new_plot.dxw = dxw
new_plot.name = str(self.averager.__name__) + \
"(" + self.data.name + ")"
new_plot.title = str(self.averager.__name__) + \
"(" + self.data.name + ")"
new_plot.source = self.data.source
new_plot.interactive = True
new_plot.detector = self.data.detector
# If the data file does not tell us what the axes are, just assume...
new_plot.xaxis("\\rm{Q}", "A^{-1}")
new_plot.yaxis("\\rm{Intensity} ", "cm^{-1}")
data = self.data
if hasattr(data, "scale") and data.scale == 'linear' and \
self.data.name.count("Residuals") > 0:
new_plot.ytransform = 'y'
new_plot.yaxis("\\rm{Residuals} ", "/")
new_plot.id = (self.averager.__name__) + self.data.name
new_plot.group_id = new_plot.id
new_plot.is_data = True
item = self._item
if self._item.parent() is not None:
item = self._item.parent()
GuiUtils.updateModelItemWithPlot(item, new_plot, new_plot.id)
self.base.manager.communicator.forcePlotDisplaySignal.emit([item, new_plot])
if self.update_model:
self.setModelFromParams()
self.draw()
def moveend(self, ev):
"""
Called after a dragging event.
Post the slicer new parameters and creates a new Data1D
corresponding to the new average
"""
self._post_data()
def restore(self):
"""
Restore the roughness for this layer.
"""
self.horizontal_lines.restore()
self.vertical_lines.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
pass
def getParams(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["x_max"] = numpy.fabs(self.vertical_lines.x)
params["y_max"] = numpy.fabs(self.horizontal_lines.y)
params["nbins"] = self.nbins
params["fold"] = self.fold
return params
def setParams(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
self.x = float(numpy.fabs(params["x_max"]))
self.y = float(numpy.fabs(params["y_max"]))
self.nbins = params["nbins"]
self.fold = params["fold"]
self.horizontal_lines.update(x=self.x, y=self.y)
self.vertical_lines.update(x=self.x, y=self.y)
self.post_data(nbins=None)
def draw(self):
"""
"""
self.base.draw()
class HorizontalLines(BaseInteractor):
"""
Draw 2 Horizontal lines centered on (0,0) that can move
on the x- direction and in opposite direction
"""
def __init__(self, base, axes, color='black', zorder=5, x=0.5, y=0.5):
"""
"""
BaseInteractor.__init__(self, base, axes, color=color)
# Class initialization
self.markers = []
self.axes = axes
# Saving the end points of two lines
self.x = x
self.save_x = x
self.y = y
self.save_y = y
# Creating a marker
# Inner circle marker
self.inner_marker = self.axes.plot([0], [self.y], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
pickradius=5, label="pick",
zorder=zorder,
visible=True)[0]
# Define 2 horizontal lines
self.top_line = self.axes.plot([self.x, -self.x], [self.y, self.y],
linestyle='-', marker='',
color=self.color, visible=True)[0]
self.bottom_line = self.axes.plot([self.x, -self.x], [-self.y, -self.y],
linestyle='-', marker='',
color=self.color, visible=True)[0]
# Flag to check the motion of the lines
self.has_move = False
# Connecting markers to mouse events and draw
self.connect_markers([self.top_line, self.inner_marker])
self.update()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear this slicer and its markers
"""
self.clear_markers()
self.inner_marker.remove()
self.top_line.remove()
self.bottom_line.remove()
def update(self, x=None, y=None):
"""
Draw the new roughness on the graph.
:param x: x-coordinates to reset current class x
:param y: y-coordinates to reset current class y
"""
# Reset x, y- coordinates if send as parameters
if x is not None:
self.x = numpy.sign(self.x) * numpy.fabs(x)
if y is not None:
self.y = numpy.sign(self.y) * numpy.fabs(y)
# Draw lines and markers
self.inner_marker.set(xdata=[0], ydata=[self.y])
self.top_line.set(xdata=[self.x, -self.x], ydata=[self.y, self.y])
self.bottom_line.set(xdata=[self.x, -self.x], ydata=[-self.y, -self.y])
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.save_x = self.x
self.save_y = self.y
def moveend(self, ev):
"""
Called after a dragging this edge and set self.has_move to False
to specify the end of dragging motion
"""
self.has_move = False
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.x = self.save_x
self.y = self.save_y
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self.y = y
self.has_move = True
self.base.base.update()
class VerticalLines(BaseInteractor):
"""
Select an annulus through a 2D plot
"""
def __init__(self, base, axes, color='black', zorder=5, x=0.5, y=0.5):
"""
"""
BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.x = numpy.fabs(x)
self.save_x = self.x
self.y = numpy.fabs(y)
self.save_y = y
# Inner circle marker
self.inner_marker = self.axes.plot([self.x], [0], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
pickradius=5, label="pick",
zorder=zorder, visible=True)[0]
self.right_line = self.axes.plot([self.x, self.x],
[self.y, -self.y],
linestyle='-', marker='',
color=self.color, visible=True)[0]
self.left_line = self.axes.plot([-self.x, -self.x],
[self.y, -self.y],
linestyle='-', marker='',
color=self.color, visible=True)[0]
self.has_move = False
self.connect_markers([self.right_line, self.inner_marker])
self.update()
def validate(self, param_name, param_value):
"""
Validate input from user
"""
return True
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear this slicer and its markers
"""
self.clear_markers()
self.inner_marker.remove()
self.left_line.remove()
self.right_line.remove()
def update(self, x=None, y=None):
"""
Draw the new roughness on the graph.
:param x: x-coordinates to reset current class x
:param y: y-coordinates to reset current class y
"""
# Reset x, y -coordinates if given as parameters
if x is not None:
self.x = numpy.sign(self.x) * numpy.fabs(x)
if y is not None:
self.y = numpy.sign(self.y) * numpy.fabs(y)
# Draw lines and markers
self.inner_marker.set(xdata=[self.x], ydata=[0])
self.left_line.set(xdata=[-self.x, -self.x], ydata=[self.y, -self.y])
self.right_line.set(xdata=[self.x, self.x], ydata=[self.y, -self.y])
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.save_x = self.x
self.save_y = self.y
def moveend(self, ev):
"""
Called after a dragging this edge and set self.has_move to False
to specify the end of dragging motion
"""
self.has_move = False
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.x = self.save_x
self.y = self.save_y
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self.has_move = True
self.x = x
self.base.base.update()
class BoxInteractorX(BoxInteractor):
"""
Average in Qx direction
"""
def __init__(self, base, axes, item=None, color='black', zorder=3):
BoxInteractor.__init__(self, base, axes, item=item, color=color)
self.base = base
self._post_data()
def _post_data(self):
"""
Post data creating by averaging in Qx direction
"""
from sas.sascalc.dataloader.manipulations import SlabX
self.post_data(SlabX, direction="X")
def validate(self, param_name, param_value):
"""
Validate input from user.
Values get checked at apply time.
"""
isValid = True
if param_name == 'nbins':
# Can't be 0
if param_value < 1:
print("Number of bins cannot be less than or equal to 0. Please adjust.")
isValid = False
return isValid
class BoxInteractorY(BoxInteractor):
"""
Average in Qy direction
"""
def __init__(self, base, axes, item=None, color='black', zorder=3):
BoxInteractor.__init__(self, base, axes, item=item, color=color)
self.base = base
self._post_data()
def _post_data(self):
"""
Post data creating by averaging in Qy direction
"""
from sas.sascalc.dataloader.manipulations import SlabY
self.post_data(SlabY, direction="Y")
def validate(self, param_name, param_value):
"""
Validate input from user
Values get checked at apply time.
"""
isValid = True
if param_name == 'nbins':
# Can't be 0
if param_value < 1:
print("Number of bins cannot be less than or equal to 0. Please adjust.")
isValid = False
return isValid
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/BoxSlicer.py
| 0.716318 | 0.309819 |
BoxSlicer.py
|
pypi
|
import numpy
import logging
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
from sas.qtgui.Plotting.PlotterData import Data1D
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Plotting.SlicerModel import SlicerModel
MIN_PHI = 0.05
class SectorInteractor(BaseInteractor, SlicerModel):
"""
Draw a sector slicer.Allow to performQ averaging on data 2D
"""
def __init__(self, base, axes, item=None, color='black', zorder=3):
BaseInteractor.__init__(self, base, axes, color=color)
SlicerModel.__init__(self)
# Class initialization
self.markers = []
self.axes = axes
self._item = item
# Connect the plot to event
self.connect = self.base.connect
# Compute qmax limit to reset the graph
x = numpy.power(max(self.data.xmax,
numpy.fabs(self.data.xmin)), 2)
y = numpy.power(max(self.data.ymax,
numpy.fabs(self.data.ymin)), 2)
self.qmax = numpy.sqrt(x + y)
# Number of points on the plot
self.nbins = 100
# Angle of the middle line
self.theta2 = numpy.pi / 3
# Absolute value of the Angle between the middle line and any side line
self.phi = numpy.pi / 12
# Middle line
self.main_line = LineInteractor(self, self.axes, color='blue',
zorder=zorder, r=self.qmax,
theta=self.theta2)
self.main_line.qmax = self.qmax
# Right Side line
self.right_line = SideInteractor(self, self.axes, color='black',
zorder=zorder, r=self.qmax,
phi=-1 * self.phi, theta2=self.theta2)
self.right_line.qmax = self.qmax
# Left Side line
self.left_line = SideInteractor(self, self.axes, color='black',
zorder=zorder, r=self.qmax,
phi=self.phi, theta2=self.theta2)
self.left_line.qmax = self.qmax
# draw the sector
self.update()
self._post_data()
self.setModelFromParams()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
self.main_line.clear()
self.left_line.clear()
self.right_line.clear()
self.base.connect.clearall()
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# Update locations
# Check if the middle line was dragged and
# update the picture accordingly
if self.main_line.has_move:
self.main_line.update()
self.right_line.update(delta=-self.left_line.phi / 2,
mline=self.main_line.theta)
self.left_line.update(delta=self.left_line.phi / 2,
mline=self.main_line.theta)
# Check if the left side has moved and update the slicer accordingly
if self.left_line.has_move:
self.main_line.update()
self.left_line.update(phi=None, delta=None, mline=self.main_line,
side=True, left=True)
self.right_line.update(phi=self.left_line.phi, delta=None,
mline=self.main_line, side=True,
left=False, right=True)
# Check if the right side line has moved and update the slicer accordingly
if self.right_line.has_move:
self.main_line.update()
self.right_line.update(phi=None, delta=None, mline=self.main_line,
side=True, left=False, right=True)
self.left_line.update(phi=self.right_line.phi, delta=None,
mline=self.main_line, side=True, left=False)
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.main_line.save(ev)
self.right_line.save(ev)
self.left_line.save(ev)
def _post_data(self, nbins=None):
"""
compute sector averaging of data2D into data1D
:param nbins: the number of point to plot for the average 1D data
"""
# Get the data2D to average
data = self.data
# If we have no data, just return
if data is None:
return
# Averaging
from sas.sascalc.dataloader.manipulations import SectorQ
radius = self.qmax
phimin = -self.left_line.phi + self.main_line.theta
phimax = self.left_line.phi + self.main_line.theta
if nbins is None:
nbins = self.nbins
sect = SectorQ(r_min=0.0, r_max=radius,
phi_min=phimin + numpy.pi,
phi_max=phimax + numpy.pi, nbins=nbins)
sector = sect(self.data)
# Create 1D data resulting from average
if hasattr(sector, "dxl"):
dxl = sector.dxl
else:
dxl = None
if hasattr(sector, "dxw"):
dxw = sector.dxw
else:
dxw = None
new_plot = Data1D(x=sector.x, y=sector.y, dy=sector.dy, dx=sector.dx)
new_plot.dxl = dxl
new_plot.dxw = dxw
new_plot.name = "SectorQ" + "(" + self.data.name + ")"
new_plot.title = "SectorQ" + "(" + self.data.name + ")"
new_plot.source = self.data.source
new_plot.interactive = True
new_plot.detector = self.data.detector
# If the data file does not tell us what the axes are, just assume them.
new_plot.xaxis("\\rm{Q}", "A^{-1}")
new_plot.yaxis("\\rm{Intensity}", "cm^{-1}")
if hasattr(data, "scale") and data.scale == 'linear' and \
self.data.name.count("Residuals") > 0:
new_plot.ytransform = 'y'
new_plot.yaxis("\\rm{Residuals} ", "/")
new_plot.group_id = "2daverage" + self.data.name
new_plot.id = "SectorQ" + self.data.name
new_plot.is_data = True
item = self._item
if self._item.parent() is not None:
item = self._item.parent()
GuiUtils.updateModelItemWithPlot(item, new_plot, new_plot.id)
self.base.manager.communicator.plotUpdateSignal.emit([new_plot])
self.base.manager.communicator.forcePlotDisplaySignal.emit([item, new_plot])
if self.update_model:
self.setModelFromParams()
self.draw()
def validate(self, param_name, param_value):
"""
Test the proposed new value "value" for row "row" of parameters
"""
MIN_DIFFERENCE = 0.01
isValid = True
if param_name == 'Delta_Phi [deg]':
# First, check the closeness
if numpy.fabs(param_value) < MIN_DIFFERENCE:
print("Sector angles too close. Please adjust.")
isValid = False
elif param_name == 'nbins':
# Can't be 0
if param_value < 1:
print("Number of bins cannot be less than or equal to 0. Please adjust.")
isValid = False
return isValid
def moveend(self, ev):
"""
Called a dragging motion ends.Get slicer event
"""
# Post parameters
self._post_data(self.nbins)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.main_line.restore()
self.left_line.restore()
self.right_line.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
pass
def getParams(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
# Always make sure that the left and the right line are at phi
# angle of the middle line
if numpy.fabs(self.left_line.phi) != numpy.fabs(self.right_line.phi):
msg = "Phi left and phi right are different"
msg += " %f, %f" % (self.left_line.phi, self.right_line.phi)
raise ValueError(msg)
params["Phi [deg]"] = self.main_line.theta * 180 / numpy.pi
params["Delta_Phi [deg]"] = numpy.fabs(self.left_line.phi * 180 / numpy.pi)
params["nbins"] = self.nbins
return params
def setParams(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
main = params["Phi [deg]"] * numpy.pi / 180
phi = numpy.fabs(params["Delta_Phi [deg]"] * numpy.pi / 180)
# phi should not be too close.
if numpy.fabs(phi) < MIN_PHI:
phi = MIN_PHI
params["Delta_Phi [deg]"] = MIN_PHI
self.nbins = int(params["nbins"])
self.main_line.theta = main
# Reset the slicer parameters
self.main_line.update()
self.right_line.update(phi=phi, delta=None, mline=self.main_line,
side=True, right=True)
self.left_line.update(phi=phi, delta=None,
mline=self.main_line, side=True)
# Post the new corresponding data
self._post_data(nbins=self.nbins)
def draw(self):
"""
Redraw canvas
"""
self.base.draw()
class SideInteractor(BaseInteractor):
"""
Draw an oblique line
:param phi: the phase between the middle line and one side line
:param theta2: the angle between the middle line and x- axis
"""
def __init__(self, base, axes, color='black', zorder=5, r=1.0,
phi=numpy.pi / 4, theta2=numpy.pi / 3):
BaseInteractor.__init__(self, base, axes, color=color)
# Initialize the class
self.markers = []
self.axes = axes
self.color = color
# compute the value of the angle between the current line and
# the x-axis
self.save_theta = theta2 + phi
self.theta = theta2 + phi
# the value of the middle line angle with respect to the x-axis
self.theta2 = theta2
# Radius to find polar coordinates this line's endpoints
self.radius = r
# phi is the phase between the current line and the middle line
self.phi = phi
# End points polar coordinates
x1 = self.radius * numpy.cos(self.theta)
y1 = self.radius * numpy.sin(self.theta)
x2 = -1 * self.radius * numpy.cos(self.theta)
y2 = -1 * self.radius * numpy.sin(self.theta)
# Defining a new marker
self.inner_marker = self.axes.plot([x1 / 2.5], [y1 / 2.5], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
pickradius=5, label="pick",
zorder=zorder, visible=True)[0]
# Defining the current line
self.line = self.axes.plot([x1, x2], [y1, y2],
linestyle='-', marker='',
color=self.color, visible=True)[0]
# Flag to differentiate the left line from the right line motion
self.left_moving = False
# Flag to define a motion
self.has_move = False
# connecting markers and draw the picture
self.connect_markers([self.inner_marker, self.line])
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
try:
self.line.remove()
self.inner_marker.remove()
except:
# Old version of matplotlib
for item in range(len(self.axes.lines)):
del self.axes.lines[0]
def update(self, phi=None, delta=None, mline=None,
side=False, left=False, right=False):
"""
Draw oblique line
:param phi: the phase between the middle line and the current line
:param delta: phi/2 applied only when the mline was moved
"""
self.left_moving = left
theta3 = 0
if phi is not None:
self.phi = phi
if delta is None:
delta = 0
if right:
self.phi = -1 * numpy.fabs(self.phi)
#delta=-delta
else:
self.phi = numpy.fabs(self.phi)
if side:
self.theta = mline.theta + self.phi
if mline is not None:
if delta != 0:
self.theta2 = mline + delta
else:
self.theta2 = mline.theta
if delta == 0:
theta3 = self.theta + delta
else:
theta3 = self.theta2 + delta
x1 = self.radius * numpy.cos(theta3)
y1 = self.radius * numpy.sin(theta3)
x2 = -1 * self.radius * numpy.cos(theta3)
y2 = -1 * self.radius * numpy.sin(theta3)
self.inner_marker.set(xdata=[x1 / 2.5], ydata=[y1 / 2.5])
self.line.set(xdata=[x1, x2], ydata=[y1, y2])
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.save_theta = self.theta
def moveend(self, ev):
self.has_move = False
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.theta = self.save_theta
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self.theta = numpy.arctan2(y, x)
self.has_move = True
if not self.left_moving:
if self.theta2 - self.theta <= 0 and self.theta2 > 0:
self.restore()
return
elif self.theta2 < 0 and self.theta < 0 and \
self.theta - self.theta2 >= 0:
self.restore()
return
elif self.theta2 < 0 and self.theta > 0 and \
(self.theta2 + 2 * numpy.pi - self.theta) >= numpy.pi / 2:
self.restore()
return
elif self.theta2 < 0 and self.theta < 0 and \
(self.theta2 - self.theta) >= numpy.pi / 2:
self.restore()
return
elif self.theta2 > 0 and (self.theta2 - self.theta >= numpy.pi / 2 or \
(self.theta2 - self.theta >= numpy.pi / 2)):
self.restore()
return
else:
if self.theta < 0 and (self.theta + numpy.pi * 2 - self.theta2) <= 0:
self.restore()
return
elif self.theta2 < 0 and (self.theta - self.theta2) <= 0:
self.restore()
return
elif self.theta > 0 and self.theta - self.theta2 <= 0:
self.restore()
return
elif self.theta - self.theta2 >= numpy.pi / 2 or \
((self.theta + numpy.pi * 2 - self.theta2) >= numpy.pi / 2 and \
self.theta < 0 and self.theta2 > 0):
self.restore()
return
self.phi = numpy.fabs(self.theta2 - self.theta)
if self.phi > numpy.pi:
self.phi = 2 * numpy.pi - numpy.fabs(self.theta2 - self.theta)
self.base.base.update()
def set_cursor(self, x, y):
self.move(x, y, None)
self.update()
def getParams(self):
params = {}
params["radius"] = self.radius
params["theta"] = self.theta
return params
def setParams(self, params):
x = params["radius"]
self.set_cursor(x, None)
class LineInteractor(BaseInteractor):
"""
Select an annulus through a 2D plot
"""
def __init__(self, base, axes, color='black',
zorder=5, r=1.0, theta=numpy.pi / 4):
BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.color = color
self.axes = axes
self.save_theta = theta
self.theta = theta
self.radius = r
self.scale = 10.0
# Inner circle
x1 = self.radius * numpy.cos(self.theta)
y1 = self.radius * numpy.sin(self.theta)
x2 = -1 * self.radius * numpy.cos(self.theta)
y2 = -1 * self.radius * numpy.sin(self.theta)
# Inner circle marker
self.inner_marker = self.axes.plot([x1 / 2.5], [y1 / 2.5], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
pickradius=5, label="pick",
zorder=zorder,
visible=True)[0]
self.line = self.axes.plot([x1, x2], [y1, y2],
linestyle='-', marker='',
color=self.color, visible=True)[0]
self.npts = 20
self.has_move = False
self.connect_markers([self.inner_marker, self.line])
self.update()
def set_layer(self, n):
self.layernum = n
self.update()
def clear(self):
self.clear_markers()
try:
self.inner_marker.remove()
self.line.remove()
except:
# Old version of matplotlib
for item in range(len(self.axes.lines)):
del self.axes.lines[0]
def update(self, theta=None):
"""
Draw the new roughness on the graph.
"""
if theta is not None:
self.theta = theta
x1 = self.radius * numpy.cos(self.theta)
y1 = self.radius * numpy.sin(self.theta)
x2 = -1 * self.radius * numpy.cos(self.theta)
y2 = -1 * self.radius * numpy.sin(self.theta)
self.inner_marker.set(xdata=[x1 / 2.5], ydata=[y1 / 2.5])
self.line.set(xdata=[x1, x2], ydata=[y1, y2])
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.save_theta = self.theta
def moveend(self, ev):
self.has_move = False
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.theta = self.save_theta
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self.theta = numpy.arctan2(y, x)
self.has_move = True
self.base.base.update()
def set_cursor(self, x, y):
self.move(x, y, None)
self.update()
def getParams(self):
params = {}
params["radius"] = self.radius
params["theta"] = self.theta
return params
def setParams(self, params):
x = params["radius"]
self.set_cursor(x, None)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/SectorSlicer.py
| 0.754734 | 0.238705 |
SectorSlicer.py
|
pypi
|
import numpy as np
from sas.qtgui.Plotting.Slicers.BaseInteractor import BaseInteractor
class ArcInteractor(BaseInteractor):
"""
Select an annulus through a 2D plot
"""
def __init__(self, base, axes, color='black', zorder=5, r=1.0,
theta1=np.pi / 8, theta2=np.pi / 4):
BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self._mouse_x = r
self._mouse_y = 0
self._save_x = r
self._save_y = 0
self.scale = 10.0
self.theta1 = theta1
self.theta2 = theta2
self.radius = r
[self.arc] = self.axes.plot([], [], linestyle='-', marker='', color=self.color)
self.npts = 20
self.has_move = False
self.connect_markers([self.arc])
self.update()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear this slicer and its markers
"""
self.clear_markers()
try:
for item in self.markers:
item.remove()
self.arc.remove()
except:
# Old version of matplotlib
for item in range(len(self.axes.lines)):
del self.axes.lines[0]
def get_radius(self):
"""
Return arc radius
"""
radius = np.sqrt(np.power(self._mouse_x, 2) + \
np.power(self._mouse_y, 2))
return radius
def update(self, theta1=None, theta2=None, nbins=None, r=None):
"""
Update the plotted arc
:param theta1: starting angle of the arc
:param theta2: ending angle of the arc
:param nbins: number of points along the arc
:param r: radius of the arc
"""
# Plot inner circle
x = []
y = []
if theta1 is not None:
self.theta1 = theta1
if theta2 is not None:
self.theta2 = theta2
while self.theta2 < self.theta1:
self.theta2 += (2 * np.pi)
while self.theta2 >= (self.theta1 + 2 * np.pi):
self.theta2 -= (2 * np.pi)
self.npts = int((self.theta2 - self.theta1) / (np.pi / 120))
if r is None:
self.radius = np.sqrt(np.power(self._mouse_x, 2) + \
np.power(self._mouse_y, 2))
else:
self.radius = r
for i in range(self.npts):
phi = (self.theta2 - self.theta1) / (self.npts - 1) * i + self.theta1
xval = 1.0 * self.radius * np.cos(phi)
yval = 1.0 * self.radius * np.sin(phi)
x.append(xval)
y.append(yval)
self.arc.set_data(x, y)
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self._save_x = self._mouse_x
self._save_y = self._mouse_y
self.base.freeze_axes()
def moveend(self, ev):
"""
After a dragging motion reset the flag self.has_move to False
:param ev: event
"""
self.has_move = False
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self._mouse_x = self._save_x
self._mouse_y = self._save_y
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self._mouse_x = x
self._mouse_y = y
self.has_move = True
self.base.base.update()
def set_cursor(self, radius, phi_min, phi_max, nbins):
"""
"""
self.theta1 = phi_min
self.theta2 = phi_max
self.update(nbins=nbins, r=radius)
def get_params(self):
"""
"""
params = {}
params["radius"] = self.radius
params["theta1"] = self.theta1
params["theta2"] = self.theta2
return params
def set_params(self, params):
"""
"""
x = params["radius"]
phi_max = self.theta2
nbins = self.npts
self.set_cursor(x, self._mouse_y, phi_max, nbins)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Plotting/Slicers/Arc.py
| 0.7586 | 0.387314 |
Arc.py
|
pypi
|
"""
"""
import copy
class DataState(object):
"""
Store information about data
"""
def __init__(self, data=None, parent=None):
"""
"""
self.parent = parent
self.data = data
self.name = ""
self.path = None
self.theory_list = {}
self.message = ""
self.id = None
def __str__(self):
_str = ""
_str += "State with ID : %s \n" % str(self.id)
if self.data is not None:
_str += "Data name : %s \n" % str(self.data.name)
_str += "Data ID : %s \n" % str(self.data.id)
_str += "Theories available: %s \n" % len(self.theory_list)
if self.theory_list:
for id, item in self.theory_list.items():
theory_data, theory_state = item
_str += "Theory name : %s \n" % str(theory_data.name)
_str += "Theory ID : %s \n" % str(id)
_str += "Theory info: \n"
_str += str(theory_data)
return _str
def clone(self):
obj = DataState(copy.deepcopy(self.data))
obj.parent = self.parent
obj.name = self.name
obj.path = self.path
obj.message = self.message
obj.id = self.id
for id, item in self.theory_list.items():
theory_data, theory_state = item
state = None
if theory_state is not None:
state = theory_state.clone()
obj.theory_list[id] = [copy.deepcopy(theory_data),
state]
return obj
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_data(self, data):
"""
"""
self.data = data
def get_data(self):
"""
"""
return self.data
def set_path(self, path):
"""
Set the path of the loaded data
"""
self.path = path
def get_path(self):
"""
return the path of the loaded data
"""
return self.path
def set_theory(self, theory_data, theory_state=None):
"""
"""
self.theory_list[theory_data.id] = [theory_data, theory_state]
data, state = list(self.theory_list.values())[0]
def get_theory(self):
return self.theory_list
def get_message(self):
"""
return message
"""
return self.message
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/MainWindow/DataState.py
| 0.426799 | 0.239072 |
DataState.py
|
pypi
|
import math
import logging
import numpy as np
from sas.qtgui.Plotting.PlotterData import Data1D
PR_FIT_LABEL = r"$P_{fit}(r)$"
PR_LOADED_LABEL = r"$P_{loaded}(r)$"
IQ_DATA_LABEL = r"$I_{obs}(q)$"
IQ_FIT_LABEL = r"$I_{fit}(q)$"
IQ_SMEARED_LABEL = r"$I_{smeared}(q)$"
GROUP_ID_IQ_DATA = r"$I_{obs}(q)$"
GROUP_ID_PR_FIT = r"$P_{fit}(r)$"
PR_PLOT_PTS = 51
logger = logging.getLogger(__name__)
class InversionLogic(object):
"""
All the data-related logic. This class deals exclusively with Data1D/2D
No QStandardModelIndex here.
"""
def __init__(self, data=None):
self._data = data
self.data_is_loaded = False
if data is not None:
self.data_is_loaded = True
@property
def data(self):
return self._data
@data.setter
def data(self, value):
""" data setter """
self._data = value
self.data_is_loaded = (self._data is not None)
def isLoadedData(self):
""" accessor """
return self.data_is_loaded
def new1DPlot(self, out, pr, q=None):
"""
Create a new 1D data instance based on fitting results
"""
qtemp = pr.x
if q is not None:
qtemp = q
# Make a plot
maxq = max(qtemp)
minq = min(qtemp)
# Check for user min/max
if pr.q_min is not None and maxq >= pr.q_min >= minq:
minq = pr.q_min
if pr.q_max is not None and maxq >= pr.q_max >= minq:
maxq = pr.q_max
x = np.arange(minq, maxq, maxq / 301.0)
# Vectorised iq.
y = pr.iq(out, x)
err = np.sqrt(np.abs(y))
index = np.isnan(y)
if index.any():
y[index] = err[index] = 1.0
logger.info("Could not compute I(q) for q =", list((x[index])))
new_plot = Data1D(x, y)
new_plot.name = IQ_FIT_LABEL + f"[{self._data.name}]"
new_plot.xaxis("\\rm{Q}", 'A^{-1}')
new_plot.yaxis("\\rm{Intensity} ", "cm^{-1}")
title = "I(q)"
new_plot.title = title
# If we have a group ID, use it
if 'plot_group_id' in pr.info:
new_plot.group_id = pr.info["plot_group_id"]
new_plot.id = IQ_FIT_LABEL
# If we have used slit smearing, plot the smeared I(q) too
if pr.slit_width > 0 or pr.slit_height > 0:
x = np.arange(minq, maxq, maxq / 301.0)
# Vectorised iq_smeared.
y = pr.get_iq_smeared(out, x)
err = np.sqrt(np.abs(y))
index = np.isnan(y)
if index.any():
y[index] = err[index] = 1.0
logger.info("Could not compute smeared I(q) for q =", list((x[index])))
new_plot = Data1D(x, y)
new_plot.name = IQ_SMEARED_LABEL
new_plot.xaxis("\\rm{Q}", 'A^{-1}')
new_plot.yaxis("\\rm{Intensity} ", "cm^{-1}")
# If we have a group ID, use it
if 'plot_group_id' in pr.info:
new_plot.group_id = pr.info["plot_group_id"]
new_plot.id = IQ_SMEARED_LABEL
new_plot.title = title
new_plot.symbol = 'Line'
new_plot.hide_error = True
return new_plot
def newPRPlot(self, out, pr, cov=None):
"""
"""
# Show P(r)
x = np.arange(0.0, pr.d_max, pr.d_max / PR_PLOT_PTS)
if cov is None:
y = pr.pr(out, x)
new_plot = Data1D(x, y)
else:
(y, dy) = pr.pr_err(out, cov, x)
new_plot = Data1D(x, y, dy=dy)
new_plot.name = PR_FIT_LABEL + f"[{self._data.name}]"
new_plot.xaxis("\\rm{r}", 'A')
new_plot.yaxis("\\rm{P(r)} ", "cm^{-3}")
new_plot.title = "P(r) fit"
new_plot.id = PR_FIT_LABEL
new_plot.xtransform = "x"
new_plot.ytransform = "y"
new_plot.group_id = GROUP_ID_PR_FIT
return new_plot
def add_errors(self, sigma=0.05):
"""
Adds errors to data set is they are not available.
Uses $\Delta y = \sigma | y |$.
"""
self._data.dy = sigma * np.fabs(self._data.y)
def computeDataRange(self):
"""
Wrapper for calculating the data range based on local dataset
"""
return self.computeRangeFromData(self.data)
def computeRangeFromData(self, data):
"""
Compute the minimum and the maximum range of the data
return the npts contains in data
"""
qmin, qmax = None, None
if isinstance(data, Data1D):
try:
qmin = min(data.x)
qmax = max(data.x)
except (ValueError, TypeError):
msg = "Unable to find min/max/length of \n data named %s" % \
self.data.filename
raise ValueError(msg)
else:
qmin = 0
try:
x = max(np.fabs(data.xmin), np.fabs(data.xmax))
y = max(np.fabs(data.ymin), np.fabs(data.ymax))
except (ValueError, TypeError):
msg = "Unable to find min/max of \n data named %s" % \
self.data.filename
raise ValueError(msg)
qmax = np.sqrt(x * x + y * y)
return qmin, qmax
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Perspectives/Inversion/InversionLogic.py
| 0.593374 | 0.492981 |
InversionLogic.py
|
pypi
|
import numpy as np
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Plotting.PlotterData import Data2D
from sas.sascalc.dataloader.data_info import Detector
from sas.sascalc.dataloader.data_info import Source
class FittingLogic(object):
"""
All the data-related logic. This class deals exclusively with Data1D/2D
No QStandardModelIndex here.
"""
def __init__(self, data=None):
self._data = data
self.data_is_loaded = False
#dq data presence in the dataset
self.dq_flag = False
#di data presence in the dataset
self.di_flag = False
if data is not None:
self.data_is_loaded = True
self.setDataProperties()
@property
def data(self):
return self._data
@data.setter
def data(self, value):
""" data setter """
self._data = value
self.data_is_loaded = True
self.setDataProperties()
def isLoadedData(self):
""" accessor """
return self.data_is_loaded
def setDataProperties(self):
"""
Analyze data and set up some properties important for
the Presentation layer
"""
if self._data.__class__.__name__ == "Data2D":
if self._data.err_data is not None and np.any(self._data.err_data):
self.di_flag = True
if self._data.dqx_data is not None and np.any(self._data.dqx_data):
self.dq_flag = True
else:
if self._data.dy is not None and np.any(self._data.dy):
self.di_flag = True
if self._data.dx is not None and np.any(self._data.dx):
self.dq_flag = True
elif self._data.dxl is not None and np.any(self._data.dxl):
self.dq_flag = True
def createDefault1dData(self, interval, tab_id=0):
"""
Create default data for fitting perspective
Only when the page is on theory mode.
"""
self._data = Data1D(x=interval)
self._data.xaxis('\\rm{Q}', "A^{-1}")
self._data.yaxis('\\rm{Intensity}', "cm^{-1}")
self._data.is_data = False
self._data.id = str(tab_id) + " data"
self._data.group_id = str(tab_id) + " Model1D"
def createDefault2dData(self, qmax, qstep, tab_id=0):
"""
Create 2D data by default
Only when the page is on theory mode.
"""
self._data = Data2D()
self._data.xaxis('\\rm{Q_{x}}', 'A^{-1}')
self._data.yaxis('\\rm{Q_{y}}', 'A^{-1}')
self._data.is_data = False
self._data.id = str(tab_id) + " data"
self._data.group_id = str(tab_id) + " Model2D"
# Default detector
self._data.detector.append(Detector())
index = len(self._data.detector) - 1
self._data.detector[index].distance = 8000 # mm
self._data.source.wavelength = 6 # A
self._data.detector[index].pixel_size.x = 5 # mm
self._data.detector[index].pixel_size.y = 5 # mm
self._data.detector[index].beam_center.x = qmax
self._data.detector[index].beam_center.y = qmax
# theory default: assume the beam
#center is located at the center of sqr detector
xmax = qmax
xmin = -qmax
ymax = qmax
ymin = -qmax
x = np.linspace(start=xmin, stop=xmax, num=qstep, endpoint=True)
y = np.linspace(start=ymin, stop=ymax, num=qstep, endpoint=True)
# Use data info instead
new_x = np.tile(x, (len(y), 1))
new_y = np.tile(y, (len(x), 1))
new_y = new_y.swapaxes(0, 1)
# all data required in 1d array
qx_data = new_x.flatten()
qy_data = new_y.flatten()
q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data)
# set all True (standing for unmasked) as default
mask = np.ones(len(qx_data), dtype=bool)
# calculate the range of qx and qy: this way,
# it is a little more independent
# store x and y bin centers in q space
x_bins = x
y_bins = y
self._data.source = Source()
self._data.data = np.ones(len(mask))
self._data.err_data = np.ones(len(mask))
self._data.qx_data = qx_data
self._data.qy_data = qy_data
self._data.q_data = q_data
self._data.mask = mask
self._data.x_bins = x_bins
self._data.y_bins = y_bins
# max and min taking account of the bin sizes
self._data.xmin = xmin
self._data.xmax = xmax
self._data.ymin = ymin
self._data.ymax = ymax
def _create1DPlot(self, tab_id, x, y, model, data, component=None):
"""
For internal use: create a new 1D data instance based on fitting results.
'component' is a string indicating the model component, e.g. "P(Q)"
"""
# Create the new plot
new_plot = Data1D(x=x, y=y)
new_plot.is_data = False
new_plot.dy = np.zeros(len(y))
_yaxis, _yunit = data.get_yaxis()
_xaxis, _xunit = data.get_xaxis()
new_plot.group_id = data.group_id
new_plot.id = str(tab_id) + " " + ("[" + component + "] " if component else "") + model.id
# use data.filename for data, use model.id for theory
id_str = data.name if data.name else model.id
new_plot.name = model.name + ((" " + component) if component else "") + " [" + id_str + "]"
new_plot.title = new_plot.name
new_plot.xaxis(_xaxis, _xunit)
new_plot.yaxis(_yaxis, _yunit)
if component is not None:
new_plot.plot_role = Data1D.ROLE_DELETABLE #deletable
return new_plot
def new1DPlot(self, return_data, tab_id):
"""
Create a new 1D data instance based on fitting results
"""
return self._create1DPlot(tab_id, return_data['x'], return_data['y'],
return_data['model'], return_data['data'])
def new2DPlot(self, return_data):
"""
Create a new 2D data instance based on fitting results
"""
image = return_data['image']
data = return_data['data']
model = return_data['model']
np.nan_to_num(image)
new_plot = Data2D(image=image, err_image=data.err_data)
new_plot.name = model.name + '2d'
new_plot.title = "Analytical model 2D "
new_plot.id = str(return_data['page_id']) + " " + data.name
new_plot.group_id = str(return_data['page_id']) + " Model2D"
new_plot.detector = data.detector
new_plot.source = data.source
new_plot.is_data = False
new_plot.qx_data = data.qx_data
new_plot.qy_data = data.qy_data
new_plot.q_data = data.q_data
new_plot.mask = data.mask
## plot boundaries
new_plot.ymin = data.ymin
new_plot.ymax = data.ymax
new_plot.xmin = data.xmin
new_plot.xmax = data.xmax
title = data.title
new_plot.is_data = False
if data.is_data:
data_name = str(data.name)
else:
data_name = str(model.__class__.__name__) + '2d'
if len(title) > 1:
new_plot.title = "Model2D for %s " % model.name + data_name
new_plot.name = model.name + " [" + \
data_name + "]"
return new_plot
def new1DProductPlots(self, return_data, tab_id):
"""
If return_data contains separated P(Q) and/or S(Q) data, create 1D plots for each and return as the tuple
(pq_plot, sq_plot). If either are unavailable, the corresponding plot is None.
"""
plots = []
for name, result in return_data['intermediate_results'].items():
if isinstance(result, tuple) and len(result) > 1:
result = result[1]
if not isinstance(result, np.ndarray):
continue
plots.append(self._create1DPlot(tab_id, return_data['x'], result,
return_data['model'], return_data['data'],
component=name))
return plots
def getScalarIntermediateResults(self, return_data):
"""
Returns a dict of scalar-only intermediate results from the return data.
"""
res = {}
for name, int_res in return_data["intermediate_results"].items():
if isinstance(int_res, np.ndarray):
continue
res[name] = int_res
return res
def computeDataRange(self):
"""
Wrapper for calculating the data range based on local dataset
"""
return self.computeRangeFromData(self.data)
def computeRangeFromData(self, data):
"""
Compute the minimum and the maximum range of the data
return the npts contains in data
"""
qmin, qmax, npts = None, None, None
if isinstance(data, Data1D):
try:
qmin = min(data.x)
qmax = max(data.x)
npts = len(data.x)
except (ValueError, TypeError):
msg = "Unable to find min/max/length of \n data named %s" % \
self.data.filename
raise ValueError(msg)
else:
qmin = 0
try:
x = max(np.fabs(data.xmin), np.fabs(data.xmax))
y = max(np.fabs(data.ymin), np.fabs(data.ymax))
except (ValueError, TypeError):
msg = "Unable to find min/max of \n data named %s" % \
self.data.filename
raise ValueError(msg)
qmax = np.sqrt(x * x + y * y)
npts = len(data.data)
return qmin, qmax, npts
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Perspectives/Fitting/FittingLogic.py
| 0.630571 | 0.405861 |
FittingLogic.py
|
pypi
|
from distutils.command.config import config
import numpy
import copy
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from bumps import options
from bumps import fitters
import sas.qtgui.Utilities.LocalConfig as LocalConfig
import sas.qtgui.Utilities.ObjectLibrary as ObjectLibrary
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Perspectives.Fitting.Constraint import Constraint
from sas.qtgui.Perspectives.Fitting.FittingWidget import FittingWidget
from sas.qtgui.Perspectives.Fitting.ConstraintWidget import ConstraintWidget
from sas.qtgui.Perspectives.Fitting.FittingOptions import FittingOptions
from sas.qtgui.Perspectives.Fitting.GPUOptions import GPUOptions
class FittingWindow(QtWidgets.QTabWidget):
"""
"""
tabsModifiedSignal = QtCore.pyqtSignal()
fittingStartedSignal = QtCore.pyqtSignal(list)
fittingStoppedSignal = QtCore.pyqtSignal(list)
name = "Fitting" # For displaying in the combo box in DataExplorer
ext = "fitv" # Extension used for saving analyses
def __init__(self, parent=None, data=None):
super(FittingWindow, self).__init__()
self.parent = parent
self._data = data
# List of active fits
self.tabs = []
# Max index for adding new, non-clashing tab names
self.maxIndex = 1
# The default optimizer
self.optimizer = 'Levenberg-Marquardt'
# Dataset index -> Fitting tab mapping
self.dataToFitTab = {}
# The tabs need to be closeable
self.setTabsClosable(True)
# The tabs need to be movabe
self.setMovable(True)
self.communicate = self.parent.communicator()
# Initialize the first tab
self.addFit(None)
# Deal with signals
self.tabCloseRequested.connect(self.tabCloses)
self.communicate.dataDeletedSignal.connect(self.dataDeleted)
self.fittingStartedSignal.connect(self.onFittingStarted)
self.fittingStoppedSignal.connect(self.onFittingStopped)
self.communicate.copyFitParamsSignal.connect(self.onParamCopy)
self.communicate.pasteFitParamsSignal.connect(self.onParamPaste)
self.communicate.copyExcelFitParamsSignal.connect(self.onExcelCopy)
self.communicate.copyLatexFitParamsSignal.connect(self.onLatexCopy)
self.communicate.SaveFitParamsSignal.connect(self.onParamSave)
# Perspective window not allowed to close by default
self._allow_close = False
# Fit options - uniform for all tabs
self.fit_options = options.FIT_CONFIG
self.fit_options_widget = FittingOptions(self, config=self.fit_options)
self.fit_options.selected_id = fitters.MPFit.id
# Listen to GUI Manager signal updating fit options
self.fit_options_widget.fit_option_changed.connect(self.onFittingOptionsChange)
# GPU Options
self.gpu_options_widget = GPUOptions(self)
self.updateWindowTitle()
# Add new tab mini-button
self.plusButton = QtWidgets.QToolButton(self)
self.plusButton.setText("+")
self.setCornerWidget(self.plusButton)
self.plusButton.setToolTip("Add a new Fit Page")
self.plusButton.clicked.connect(lambda: self.addFit(None))
def updateWindowTitle(self):
"""
Update the window title with the current optimizer name
"""
self.optimizer = self.fit_options.selected_name
self.setWindowTitle('Fit panel - Active Fitting Optimizer: %s' % self.optimizer)
def setClosable(self, value=True):
"""
Allow outsiders to close this widget
"""
assert isinstance(value, bool)
self._allow_close = value
def onParamCopy(self):
self.currentTab.onCopyToClipboard("")
def onParamPaste(self):
self.currentTab.onParameterPaste()
def onExcelCopy(self):
self.currentTab.onCopyToClipboard("Excel")
def onLatexCopy(self):
self.currentTab.onCopyToClipboard("Latex")
def serializeAll(self):
return self.serializeAllFitpage()
def serializeAllFitpage(self):
# serialize all active fitpages and return
# a dictionary: {data_id: fitpage_state}
state = {}
for i, tab in enumerate(self.tabs):
tab_state = self.getSerializedFitpage(tab)
for key, value in tab_state.items():
if key in state:
state[key].update(value)
else:
state[key] = value
return state
def serializeCurrentPage(self):
# serialize current(active) fitpage
return self.getSerializedFitpage(self.currentTab)
def getSerializedFitpage(self, tab):
"""
get serialize requested fit tab
"""
state = {}
fitpage_state = tab.getFitPage()
fitpage_state += tab.getFitModel()
# put the text into dictionary
line_dict = {}
for line in fitpage_state:
#content = line.split(',')
if len(line) > 1:
line_dict[line[0]] = line[1:]
if 'data_id' not in line_dict: return state
id = line_dict['data_id'][0]
if not isinstance(id, list):
id = [id]
for i in id:
if 'is_constraint' in line_dict.keys():
state[i] = line_dict
elif i in state and 'fit-params' in state[i]:
state[i]['fit_params'].update(line_dict)
else:
state[i] = {'fit_params': [line_dict]}
return state
def currentTabDataId(self):
"""
Returns the data ID of the current tab
"""
tab_id = []
if not self.currentTab.data:
return tab_id
for item in self.currentTab.all_data:
data = GuiUtils.dataFromItem(item)
tab_id.append(data.id)
return tab_id
def updateFromParameters(self, parameters):
"""
Pass the update parameters to the current fit page
"""
self.currentTab.createPageForParameters(parameters)
def updateFromConstraints(self, constraint_dict):
"""
Updates all tabs with constraints present in *constraint_dict*, where
*constraint_dict* keys are the fit page name, and the value is a
list of constraints. A constraint is represented by a list [value,
param, value_ex, validate, function] of attributes of a Constraint
object
"""
for fit_page_name, constraint_list in constraint_dict.items():
tab = self.getTabByName(fit_page_name)
for constraint_param in constraint_list:
if constraint_param is not None and len(constraint_param) == 5:
constraint = Constraint()
constraint.value = constraint_param[0]
constraint.func = constraint_param[4]
constraint.param = constraint_param[1]
constraint.value_ex = constraint_param[2]
constraint.validate = constraint_param[3]
tab.addConstraintToRow(constraint=constraint,
row=tab.getRowFromName(
constraint_param[1]))
def onParamSave(self):
self.currentTab.onCopyToClipboard("Save")
def closeEvent(self, event):
"""
Overwrite QDialog close method to allow for custom widget close
"""
# Invoke fit page events
if self._allow_close:
# reset the closability flag
self.setClosable(value=False)
# Tell the MdiArea to close the container if it is visible
if self.parentWidget():
self.parentWidget().close()
event.accept()
else:
# Maybe we should just minimize
self.setWindowState(QtCore.Qt.WindowMinimized)
event.ignore()
def addFit(self, data, is_batch=False, tab_index=None):
"""
Add a new tab for passed data
"""
if tab_index is None:
tab_index = self.maxIndex
else:
self.maxIndex = tab_index
tab = FittingWidget(parent=self.parent, data=data, tab_id=tab_index)
tab.is_batch_fitting = is_batch
# Add this tab to the object library so it can be retrieved by scripting/jupyter
tab_name = self.getTabName(is_batch=is_batch)
ObjectLibrary.addObject(tab_name, tab)
self.tabs.append(tab)
if data:
self.updateFitDict(data, tab_name)
self.maxIndex = max([tab.tab_id for tab in self.tabs], default=0) + 1
icon = QtGui.QIcon()
if is_batch:
icon.addPixmap(QtGui.QPixmap("src/sas/qtgui/images/icons/layers.svg"))
self.addTab(tab, icon, tab_name)
# Show the new tab
self.setCurrentWidget(tab)
# Notify listeners
self.tabsModifiedSignal.emit()
def addConstraintTab(self):
"""
Add a new C&S fitting tab
"""
tabs = [isinstance(tab, ConstraintWidget) for tab in self.tabs]
if any(tabs):
# We already have a C&S tab: show it
self.setCurrentIndex(tabs.index(True))
return
tab = ConstraintWidget(parent=self)
# Add this tab to the object library so it can be retrieved by scripting/jupyter
tab_name = self.getCSTabName() # TODO update the tab name scheme
ObjectLibrary.addObject(tab_name, tab)
self.tabs.append(tab)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("src/sas/qtgui/images/icons/link.svg"))
self.addTab(tab, icon, tab_name)
# This will be the last tab, so set the index accordingly
self.setCurrentIndex(self.count()-1)
def updateFitDict(self, item_key, tab_name):
"""
Create a list if none exists and append if there's already a list
"""
item_key_str = str(item_key)
if item_key_str in list(self.dataToFitTab.keys()):
self.dataToFitTab[item_key_str].append(tab_name)
else:
self.dataToFitTab[item_key_str] = [tab_name]
def getTabName(self, is_batch=False):
"""
Get the new tab name, based on the number of fitting tabs so far
"""
page_name = "BatchPage" if is_batch else "FitPage"
page_name = page_name + str(self.maxIndex)
return page_name
def getCSTabName(self):
"""
Get the new tab name, based on the number of fitting tabs so far
"""
page_name = "Const. & Simul. Fit"
return page_name
def closeTabByIndex(self, index):
"""
Close/delete a tab with the given index.
No checks on validity of the index.
"""
try:
ObjectLibrary.deleteObjectByRef(self.tabs[index])
self.removeTab(index)
del self.tabs[index]
self.tabsModifiedSignal.emit()
except IndexError:
# The tab might have already been deleted previously
pass
def resetTab(self, index):
"""
Adds a new tab and removes the last tab
as a way of resetting the fit tabs
"""
# If data on tab empty - do nothing
if index in self.tabs and not self.tabs[index].data:
return
# Add a new, empy tab
self.addFit(None)
# Remove the previous last tab
self.tabCloses(index)
def tabCloses(self, index):
"""
Update local bookkeeping on tab close
"""
# don't remove the last tab
if len(self.tabs) <= 1:
self.resetTab(index)
return
self.closeTabByIndex(index)
def closeTabByName(self, tab_name):
"""
Given name of the fitting tab - close it
"""
for tab_index in range(len(self.tabs)):
if self.tabText(tab_index) == tab_name:
self.tabCloses(tab_index)
pass # debug hook
def dataDeleted(self, index_list):
"""
Delete fit tabs referencing given data
"""
if not index_list or not self.dataToFitTab:
return
for index_to_delete in index_list:
index_to_delete_str = str(index_to_delete)
orig_dict = copy.deepcopy(self.dataToFitTab)
for tab_key in orig_dict.keys():
if index_to_delete_str in tab_key:
for tab_name in orig_dict[tab_key]:
self.closeTabByName(tab_name)
self.dataToFitTab.pop(tab_key)
def allowBatch(self):
"""
Tell the caller that we accept multiple data instances
"""
return True
def allowSwap(self):
"""
Tell the caller that you can swap data
"""
return True
def isSerializable(self):
"""
Tell the caller that this perspective writes its state
"""
return True
def setData(self, data_item=None, is_batch=False, tab_index=None):
"""
Assign new dataset to the fitting instance
Obtain a QStandardItem object and dissect it to get Data1D/2D
Pass it over to the calculator
"""
assert data_item is not None
if not isinstance(data_item, list):
msg = "Incorrect type passed to the Fitting Perspective"
raise AttributeError(msg)
if not isinstance(data_item[0], QtGui.QStandardItem):
msg = "Incorrect type passed to the Fitting Perspective"
raise AttributeError(msg)
if is_batch:
# Just create a new fit tab. No empty batchFit tabs
self.addFit(data_item, is_batch=is_batch)
return
items = [data_item] if is_batch else data_item
for data in items:
# Find the first unassigned tab.
# If none, open a new tab.
available_tabs = [tab.acceptsData() for tab in self.tabs]
tab_ids = [tab.tab_id for tab in self.tabs]
if tab_index is not None:
if tab_index not in tab_ids:
self.addFit(data, is_batch=is_batch, tab_index=tab_index)
else:
self.setCurrentIndex(tab_index-1)
self.swapData(data)
return
if numpy.any(available_tabs):
first_good_tab = available_tabs.index(True)
self.tabs[first_good_tab].data = data
tab_name = str(self.tabText(first_good_tab))
self.updateFitDict(data, tab_name)
else:
self.addFit(data, is_batch=is_batch)
def swapData(self, data):
"""
Replace the data from the current fitting tab
"""
if not isinstance(self.currentWidget(), FittingWidget):
msg = "Current tab is not a fitting widget"
raise TypeError(msg)
if not isinstance(data, QtGui.QStandardItem):
msg = "Incorrect type passed to the Fitting Perspective"
raise AttributeError(msg)
if self.currentTab.is_batch_fitting:
msg = "Data in Batch Fitting cannot be swapped"
raise RuntimeError(msg)
self.currentTab.data = data
tab_name = str(self.tabText(self.currentIndex()))
self.updateFitDict(data, tab_name)
def onFittingOptionsChange(self, fit_engine):
"""
React to the fitting algorithm change by modifying window title
"""
fitter = [f.id for f in options.FITTERS if f.name == str(fit_engine)][0]
# set the optimizer
self.fit_options.selected_id = str(fitter)
# Update the title
self.updateWindowTitle()
def onFittingStarted(self, tabs_for_fitting=None):
"""
Notify tabs listed in tabs_for_fitting
that the fitting thread started
"""
assert(isinstance(tabs_for_fitting, list))
assert(len(tabs_for_fitting)>0)
for tab_object in self.tabs:
if not isinstance(tab_object, FittingWidget):
continue
page_name = "Page%s"%tab_object.tab_id
if any([page_name in tab for tab in tabs_for_fitting]):
tab_object.disableInteractiveElements()
pass
def onFittingStopped(self, tabs_for_fitting=None):
"""
Notify tabs listed in tabs_for_fitting
that the fitting thread stopped
"""
assert(isinstance(tabs_for_fitting, list))
assert(len(tabs_for_fitting)>0)
for tab_object in self.tabs:
if not isinstance(tab_object, FittingWidget):
continue
page_name = "Page%s"%tab_object.tab_id
if any([page_name in tab for tab in tabs_for_fitting]):
tab_object.enableInteractiveElements()
def getCurrentStateAsXml(self):
"""
Returns an XML version of the current state
"""
state = {}
for tab in self.tabs:
pass
return state
@property
def currentTab(self):
"""
Returns the tab widget currently shown
"""
return self.currentWidget()
def getFitTabs(self):
"""
Returns the list of fitting tabs
"""
return [tab for tab in self.tabs if isinstance(tab, FittingWidget)]
def getActiveConstraintList(self):
"""
Returns a list of the constraints for all fitting tabs. Constraints
are a tuple of strings (parameter, expression) e.g. ('M1.scale',
'M2.scale + 2')
"""
constraints = []
for tab in self.getFitTabs():
tab_name = tab.modelName()
tab_constraints = tab.getConstraintsForModel()
constraints.extend((tab_name + "." + par, expr)
for par, expr in tab_constraints)
return constraints
def getSymbolDictForConstraints(self):
"""
Returns a dictionary containing all the symbols in all constrained tabs
and their values.
"""
symbol_dict = {}
for tab in self.getFitTabs():
symbol_dict.update(tab.getSymbolDict())
return symbol_dict
def getConstraintTab(self):
"""
Returns the constraint tab, or None if no constraint tab is active
"""
if any(isinstance(tab, ConstraintWidget) for tab in self.tabs):
constraint_tab = next(tab
for tab in self.tabs
if isinstance(tab, ConstraintWidget))
else:
constraint_tab = None
return constraint_tab
def getTabByName(self, name):
"""
Returns the tab with with attribute name *name*
"""
assert isinstance(name, str)
for tab in self.tabs:
if tab.modelName() == name:
return tab
return None
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Perspectives/Fitting/FittingPerspective.py
| 0.527803 | 0.164215 |
FittingPerspective.py
|
pypi
|
import copy
import numpy as np
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from sas.sascalc.fit.qsmearing import smear_selection, PySmear, PySmear2D
from sas.qtgui.Plotting.PlotterData import Data1D
from sas.qtgui.Plotting.PlotterData import Data2D
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sasmodels.resolution import Slit1D, Pinhole1D
from sasmodels.sesans import SesansTransform
# Local UI
from sas.qtgui.Perspectives.Fitting.UI.SmearingWidgetUI import Ui_SmearingWidgetUI
class DataWidgetMapper(QtWidgets.QDataWidgetMapper):
"""
Custom version of the standard QDataWidgetMapper allowing for proper
response to index change in comboboxes
"""
def addMapping(self, widget, section, propertyName=None):
if propertyName is None:
super(DataWidgetMapper, self).addMapping(widget, section)
else:
super(DataWidgetMapper, self).addMapping(widget, section, propertyName)
if isinstance(widget, QtWidgets.QComboBox):
delegate = self.itemDelegate()
widget.currentIndexChanged.connect(lambda: delegate.commitData.emit(widget))
SMEARING_1D = ["Custom Pinhole Smear", "Custom Slit Smear"]
SMEARING_2D = ["Custom Pinhole Smear"]
SMEARING_QD = "Use dQ Data"
MODEL = [
'SMEARING',
'PINHOLE_MIN',
'PINHOLE_MAX',
'ACCURACY']
ACCURACY_DICT={'Low': 'low',
'Medium': 'med',
'High': 'high',
'Extra high': 'xhigh'}
DEFAULT_PINHOLE_UP=0.0
DEFAULT_PINHOLE_DOWN=0.0
class SmearingWidget(QtWidgets.QWidget, Ui_SmearingWidgetUI):
smearingChangedSignal = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(SmearingWidget, self).__init__()
self.setupUi(self)
# Local model for holding data
self.model = None
# Mapper for model update
self.mapper = None
# Data from the widget
self.data = None
self.current_smearer = None
self.kernel_model = None
# dQ data variables
self.smear_type = None
self.dq_l = None
self.dq_r = None
# current pinhole/slot values
self.pinhole = 0.0
self.slit_height = 0.0
self.slit_width = 0.0
# current accuracy option
self.accuracy = ""
# Let only floats in the line edits
self.txtSmearDown.setValidator(GuiUtils.DoubleValidator())
self.txtSmearUp.setValidator(GuiUtils.DoubleValidator())
# Attach slots
self.cbSmearing.currentIndexChanged.connect(self.onIndexChange)
self.cbSmearing.setCurrentIndex(0)
self.txtSmearUp.setText(str(DEFAULT_PINHOLE_UP))
self.txtSmearDown.setText(str(DEFAULT_PINHOLE_DOWN))
self.initModel()
self.initMapper()
def initModel(self):
"""
Initialize the state
"""
self.model = QtGui.QStandardItemModel()
for model_item in range(len(MODEL)):
self.model.setItem(model_item, QtGui.QStandardItem())
# Attach slot
self.model.dataChanged.connect(self.onModelChange)
def initMapper(self):
"""
Initialize model item <-> UI element mapping
"""
self.mapper = DataWidgetMapper(self)
self.mapper.setModel(self.model)
self.mapper.setOrientation(QtCore.Qt.Vertical)
self.mapper.addMapping(self.txtSmearUp, MODEL.index('PINHOLE_MIN'))
self.mapper.addMapping(self.txtSmearDown, MODEL.index('PINHOLE_MAX'))
self.mapper.addMapping(self.cbAccuracy, MODEL.index('ACCURACY'))
self.mapper.toFirst()
def updateData(self, data=None):
"""
Update control elements based on data and model passed
"""
# retain the combobox index
old_index = self.cbSmearing.currentIndex()
self.cbSmearing.clear()
self.cbSmearing.addItem("None")
self.gAccuracy.setVisible(False)
self.data = data
if data is None:
self.setElementsVisibility(False)
model = self.kernel_model
self.updateKernelModel(model, keep_order = True, old_index=old_index)
def updateKernelModel(self, kernel_model=None, keep_order=False, old_index=None):
"""
Update the model
"""
self.kernel_model = kernel_model
# keep the original cbSmearing value, if already set
index_to_show = self.cbSmearing.currentIndex()
if old_index is not None:
index_to_show = old_index
self.cbSmearing.blockSignals(True)
self.cbSmearing.clear()
self.cbSmearing.addItem("None")
if self.data is None:
self.setElementsVisibility(False)
return
# Find out if data has dQ
self.current_smearer = smear_selection(self.data, self.kernel_model)
self.setSmearInfo()
if self.smear_type is not None:
self.cbSmearing.addItem(SMEARING_QD)
index_to_show = 1 if keep_order else index_to_show
if self.kernel_model is None:
# No model definend yet - just use data file smearing, if any
self.cbSmearing.blockSignals(False)
self.cbSmearing.setCurrentIndex(index_to_show)
return
if isinstance(self.data, Data1D):
self.cbSmearing.addItems(SMEARING_1D)
else:
self.cbSmearing.addItems(SMEARING_2D)
self.cbSmearing.blockSignals(False)
self.cbSmearing.setCurrentIndex(index_to_show)
def smearer(self):
""" Returns the current smearer """
return self.current_smearer
def onIndexChange(self, index):
"""
Callback for smearing combobox index change
"""
text = self.cbSmearing.currentText()
if text == 'None':
self.setElementsVisibility(False)
self.current_smearer = None
elif text == "Use dQ Data":
self.setElementsVisibility(True)
self.setDQLabels()
self.onDQSmear()
elif text == "Custom Pinhole Smear":
self.setElementsVisibility(True)
self.setPinholeLabels()
self.onPinholeSmear()
elif text == "Custom Slit Smear":
self.setElementsVisibility(True)
self.setSlitLabels()
self.onSlitSmear()
self.smearingChangedSignal.emit()
def onModelChange(self):
"""
Respond to model change by notifying any listeners
"""
# Recalculate the smearing
index = self.cbSmearing.currentIndex()
# update the backup values based on model choice
smearing, accuracy, d_down, d_up = self.state()
# don't save the state if dQ Data
if smearing == "Custom Pinhole Smear":
self.pinhole = d_up
elif smearing == 'Custom Slit Smear':
self.slit_height = d_up
self.slit_width = d_down
# check changes in accuracy
if self.accuracy != accuracy:
self.accuracy = accuracy
if accuracy == 'High' or accuracy == 'Extra high':
QtWidgets.QMessageBox.information(self, "Accuracy Warning",
"Higher accuracy is very expensive, \nso fitting can be very slow!")
self.onIndexChange(index)
def setElementsVisibility(self, visible):
"""
Labels and linedits visibility control
"""
self.lblSmearDown.setVisible(visible)
self.lblSmearUp.setVisible(visible)
self.txtSmearDown.setVisible(visible)
self.txtSmearUp.setVisible(visible)
self.lblUnitUp.setVisible(visible)
self.lblUnitDown.setVisible(visible)
self.setAccuracyVisibility()
def setAccuracyVisibility(self):
"""
Accuracy combobox visibility
"""
if isinstance(self.data, Data2D) and self.cbSmearing.currentIndex() >= 1:
self.gAccuracy.setVisible(True)
else:
self.gAccuracy.setVisible(False)
def setPinholeLabels(self):
"""
Use pinhole labels
"""
self.txtSmearDown.setVisible(False)
self.lblSmearDown.setText('')
self.lblUnitDown.setText('')
self.lblSmearUp.setText('<html><head/><body><p>dQ/Q</p></body></html>')
self.lblUnitUp.setText('%')
self.txtSmearUp.setText(str(self.pinhole))
self.txtSmearDown.setEnabled(True)
self.txtSmearUp.setEnabled(True)
def setSlitLabels(self):
"""
Use pinhole labels
"""
self.lblSmearUp.setText('Slit height')
self.lblSmearDown.setText('Slit width')
self.lblUnitUp.setText('<html><head/><body><p>Å<span style=" vertical-align:super;">-1</span></p></body></html>')
self.lblUnitDown.setText('<html><head/><body><p>Å<span style=" vertical-align:super;">-1</span></p></body></html>')
self.txtSmearUp.setText(str(self.slit_height))
self.txtSmearDown.setText(str(self.slit_width))
self.txtSmearDown.setEnabled(True)
self.txtSmearUp.setEnabled(True)
def setDQLabels(self):
"""
Use appropriate labels
"""
if self.smear_type == "Pinhole":
text_down = '<html><head/><body><p>[dQ/Q]<span style=" vertical-align:sub;">max</span></p></body></html>'
text_up = '<html><head/><body><p>[dQ/Q]<span style=" vertical-align:sub;">min</span></p></body></html>'
text_unit = '%'
elif self.smear_type == "Slit":
text_down = '<html><head/><body><p>Slit width</p></body></html>'
text_up = '<html><head/><body><p>Slit height</p></body></html>'
text_unit = '<html><head/><body><p>Å<span style=" vertical-align:super;">-1</span></p></body></html>'
else:
text_unit = '%'
text_up = '<html><head/><body><p>‹dQ/Q›<span style=" vertical-align:sub;">r</span></p></body></html>'
text_down = '<html><head/><body><p>‹dQ/Q›<span style=" vertical-align:sub;">φ</span></p></body></html>'
self.lblSmearDown.setText(text_down)
self.lblSmearUp.setText(text_up)
self.lblUnitUp.setText(text_unit)
self.lblUnitDown.setText(text_unit)
self.txtSmearDown.setText(str(self.dq_r))
self.txtSmearUp.setText(str(self.dq_l))
self.txtSmearDown.setEnabled(False)
self.txtSmearUp.setEnabled(False)
def state(self):
"""
Returns current state of controls
"""
smearing = self.cbSmearing.currentText()
accuracy = ""
d_down = None
d_up = None
if smearing != "None":
accuracy = str(self.model.item(MODEL.index('ACCURACY')).text())
try:
d_down = float(self.txtSmearDown.text())
except ValueError:
d_down = 0.0
try:
d_up = float(self.txtSmearUp.text())
except ValueError:
d_up = 0.0
return (smearing, accuracy, d_down, d_up)
def setState(self, smearing, accuracy, d_down, d_up):
"""
Sets new values for the controls
"""
# Update the model -> controls update automatically
if accuracy is not None:
self.model.item(MODEL.index('ACCURACY')).setText(accuracy)
if d_down is not None:
self.model.item(MODEL.index('PINHOLE_MIN')).setText(str(d_down))
if d_up is not None:
self.model.item(MODEL.index('PINHOLE_MAX')).setText(str(d_up))
def onDQSmear(self):
"""
Create a custom dQ smear object that will change the way residuals
are compute when fitting
"""
# resolution information already in data.dx (if 1D) or
# data.dqx_data & data.dqy_data (if 2D),
# so only need to set accuracy for 2D
_, accuracy, _, _ = self.state()
self.current_smearer = smear_selection(self.data, self.kernel_model)
if isinstance(self.data, Data2D):
backend_accuracy = ACCURACY_DICT.get(accuracy)
if backend_accuracy:
self.current_smearer.set_accuracy(accuracy=backend_accuracy)
else:
self.current_smearer.set_accuracy(accuracy='low')
def onPinholeSmear(self):
"""
Create a custom pinhole smear object that will change the way residuals
are compute when fitting
"""
_, accuracy, _, d_percent = self.state()
self.pinhole = d_percent
if d_percent is None or d_percent == 0.0:
self.current_smearer = None
return
percent = d_percent/100.0
# copy data
data = copy.deepcopy(self.data)
if isinstance(self.data, Data2D):
len_data = len(data.data)
data.dqx_data = np.zeros(len_data)
data.dqy_data = np.zeros(len_data)
q = np.sqrt(data.qx_data**2 + data.qy_data**2)
data.dqx_data = data.dqy_data = percent*q
else:
len_data = len(data.x)
data.dx = np.zeros(len_data)
data.dx = percent * data.x
data.dxl = None
data.dxw = None
self.current_smearer = smear_selection(data, self.kernel_model)
# need to set accuracy for 2D
if isinstance(self.data, Data2D):
backend_accuracy = ACCURACY_DICT.get(accuracy)
if backend_accuracy:
self.current_smearer.set_accuracy(accuracy=backend_accuracy)
else:
self.current_smearer.set_accuracy(accuracy='low')
def onSlitSmear(self):
"""
Create a custom slit smear object that will change the way residuals
are compute when fitting
"""
_, accuracy, d_width, d_height = self.state()
# Check changes in slit width
if d_width is None:
d_width = 0.0
if d_height is None:
d_height = 0.0
self.slit_width = d_width
self.slit_height = d_height
if isinstance(self.data, Data2D):
self.current_smearer = smear_selection(self.data, self.kernel_model)
return
# make sure once more if it is smearer
data = copy.deepcopy(self.data)
data_len = len(data.x)
data.dx = None
data.dxl = None
data.dxw = None
try:
self.dxl = d_height
data.dxl = self.dxl * np.ones(data_len)
except:
self.dxl = None
data.dxl = np.zeros(data_len)
try:
self.dxw = d_width
data.dxw = self.dxw * np.ones(data_len)
except:
self.dxw = None
data.dxw = np.zeros(data_len)
self.current_smearer = smear_selection(data, self.kernel_model)
def setSmearInfo(self):
"""
Set default smear_type, dq_l, and dq_r based on the q-resolution information found in the data.
"""
# default
self.smear_type = None
self.dq_l = None
self.dq_r = None
data = self.data
if self.data is None:
return
# First check if data is 2D - If so check that data set has smearing info.
elif isinstance(data, Data2D):
if isinstance(self.smearer(), PySmear2D):
self.smear_type = "Pinhole2d"
self.dq_l = GuiUtils.formatNumber(np.average(data.dqx_data/np.abs(data.qx_data))*100., high=True)
self.dq_r = GuiUtils.formatNumber(np.average(data.dqy_data/np.abs(data.qy_data))*100., high=True)
# Check for pinhole smearing and get min max if it is.
elif (isinstance(self.smearer(), PySmear)
and isinstance(self.smearer().resolution, (Pinhole1D, SesansTransform))):
self.smear_type = "Pinhole"
self.dq_r = GuiUtils.formatNumber(data.dx[0]/data.x[0] *100., high=True)
self.dq_l = GuiUtils.formatNumber(data.dx[-1]/data.x[-1] *100., high=True)
# Check for slit smearing and get min max if it is.
elif isinstance(self.smearer(), PySmear) and isinstance(self.smearer().resolution, Slit1D):
self.smear_type = "Slit"
if data.dxl is not None and np.all(data.dxl, 0):
self.dq_l = GuiUtils.formatNumber(data.dxl[0])
if data.dxw is not None and np.all(data.dxw, 0):
self.dq_r = GuiUtils.formatNumber(data.dxw[0])
def resetSmearer(self):
self.current_smearer = None
self.cbSmearing.blockSignals(True)
self.cbSmearing.clear()
self.cbSmearing.blockSignals(False)
|
/sasview-5.0.6b3-py3-none-any.whl/sas/qtgui/Perspectives/Fitting/SmearingWidget.py
| 0.517327 | 0.262901 |
SmearingWidget.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.