desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Open the file at ``self._file_path``'
def _open_file(self):
if (self._file is None): self._file = open(self._file_path, 'rb+') self._pos = self._file.tell() assert (self._pos == 0)
'Read an event from the binlog file'
def _read_event(self):
headerlength = 19 header = self._file.read(headerlength) event_pos = self._pos self._pos += len(header) if (len(header) == 0): return None event = SimpleBinLogEvent(header) event.set_pos(event_pos) if (event.event_size < headerlength): messagefmt = 'Event size {0} is too small' message = messagefmt.format(event.event_size) raise EventSizeTooSmallError(message) else: body = self._file.read((event.event_size - headerlength)) self._pos += len(body) event.set_body(body) return event
'Read the first four *magic* bytes of the binlog file'
def _read_magic(self):
self._open_file() if (self._pos == 0): magic = self._file.read(4) if (magic == self._expected_magic): self._pos += len(magic) else: messagefmt = 'Magic bytes {0!r} did not match expected {1!r}' message = messagefmt.format(magic, self._expected_magic) raise BadMagicBytesError(message)
'Initialize the Event with the event header'
def __init__(self, header):
unpacked = struct.unpack('<IcIIIH', header) self.timestamp = unpacked[0] self.event_type = byte2int(unpacked[1]) self.server_id = unpacked[2] self.event_size = unpacked[3] self.log_pos = unpacked[4] self.flags = unpacked[5] self.body = None self.pos = None
'Save the body bytes'
def set_body(self, body):
self.body = body
'Save the event position'
def set_pos(self, pos):
self.pos = pos
'Attributes: value: string or tuple if string, then it will be used hostname if tuple it will be used as (hostname, user, password, port)'
def __init__(self, value):
if isinstance(value, (tuple, list)): try: self.hostname = value[0] self.username = value[1] self.password = value[2] self.port = int(value[3]) except IndexError: pass elif isinstance(value, dict): for key in ['hostname', 'username', 'password', 'port']: try: setattr(self, key, value[key]) except KeyError: pass else: self.hostname = value
'server_id: the slave server-id master_id: usually 0. Appears as "master id" in SHOW SLAVE HOSTS on the master. Unknown what else it impacts.'
def encoded(self, server_id, master_id=0):
lhostname = len(self.hostname.encode()) lusername = len(self.username.encode()) lpassword = len(self.password.encode()) packet_len = ((((((((((1 + 4) + 1) + lhostname) + 1) + lusername) + 1) + lpassword) + 2) + 4) + 4) MAX_STRING_LEN = 257 return ((((((((struct.pack('<i', packet_len) + int2byte(COM_REGISTER_SLAVE)) + struct.pack('<L', server_id)) + struct.pack(('<%dp' % min(MAX_STRING_LEN, (lhostname + 1))), self.hostname.encode())) + struct.pack(('<%dp' % min(MAX_STRING_LEN, (lusername + 1))), self.username.encode())) + struct.pack(('<%dp' % min(MAX_STRING_LEN, (lpassword + 1))), self.password.encode())) + struct.pack('<H', self.port)) + struct.pack('<l', 0)) + struct.pack('<l', master_id))
'Attributes: ctl_connection_settings: Connection settings for cluster holding schema information resume_stream: Start for event from position or the latest event of binlog or from older available event blocking: Read on stream is blocking only_events: Array of allowed events ignored_events: Array of ignored events log_file: Set replication start log file log_pos: Set replication start log pos (resume_stream should be true) auto_position: Use master_auto_position gtid to set position only_tables: An array with the tables you want to watch (only works in binlog_format ROW) ignored_tables: An array with the tables you want to skip only_schemas: An array with the schemas you want to watch ignored_schemas: An array with the schemas you want to skip freeze_schema: If true do not support ALTER TABLE. It\'s faster. skip_to_timestamp: Ignore all events until reaching specified timestamp. report_slave: Report slave in SHOW SLAVE HOSTS. slave_uuid: Report slave_uuid in SHOW SLAVE HOSTS. fail_on_table_metadata_unavailable: Should raise exception if we can\'t get table information on row_events slave_heartbeat: (seconds) Should master actively send heartbeat on connection. This also reduces traffic in GTID replication on replication resumption (in case many event to skip in binlog). See MASTER_HEARTBEAT_PERIOD in mysql documentation for semantics'
def __init__(self, connection_settings, server_id, ctl_connection_settings=None, resume_stream=False, blocking=False, only_events=None, log_file=None, log_pos=None, filter_non_implemented_events=True, ignored_events=None, auto_position=None, only_tables=None, ignored_tables=None, only_schemas=None, ignored_schemas=None, freeze_schema=False, skip_to_timestamp=None, report_slave=None, slave_uuid=None, pymysql_wrapper=None, fail_on_table_metadata_unavailable=False, slave_heartbeat=None):
self.__connection_settings = connection_settings self.__connection_settings.setdefault('charset', 'utf8') self.__connected_stream = False self.__connected_ctl = False self.__resume_stream = resume_stream self.__blocking = blocking self._ctl_connection_settings = ctl_connection_settings if ctl_connection_settings: self._ctl_connection_settings.setdefault('charset', 'utf8') self.__only_tables = only_tables self.__ignored_tables = ignored_tables self.__only_schemas = only_schemas self.__ignored_schemas = ignored_schemas self.__freeze_schema = freeze_schema self.__allowed_events = self._allowed_event_list(only_events, ignored_events, filter_non_implemented_events) self.__fail_on_table_metadata_unavailable = fail_on_table_metadata_unavailable self.__allowed_events_in_packet = frozenset([TableMapEvent, RotateEvent]).union(self.__allowed_events) self.__server_id = server_id self.__use_checksum = False self.table_map = {} self.log_pos = log_pos self.log_file = log_file self.auto_position = auto_position self.skip_to_timestamp = skip_to_timestamp if report_slave: self.report_slave = ReportSlave(report_slave) self.slave_uuid = slave_uuid self.slave_heartbeat = slave_heartbeat if pymysql_wrapper: self.pymysql_wrapper = pymysql_wrapper else: self.pymysql_wrapper = pymysql.connect
'Return True if binlog-checksum = CRC32. Only for MySQL > 5.6'
def __checksum_enabled(self):
cur = self._stream_connection.cursor() cur.execute("SHOW GLOBAL VARIABLES LIKE 'BINLOG_CHECKSUM'") result = cur.fetchone() cur.close() if (result is None): return False (var, value) = result[:2] if (value == 'NONE'): return False return True
'Core data dumped for the event'
def _dump(self):
pass
'GTID = source_id:transaction_id Eg: 3E11FA47-71CA-11E1-9E33-C80AA9429562:23 See: http://dev.mysql.com/doc/refman/5.6/en/replication-gtids-concepts.html'
@property def gtid(self):
gtid = ('%s%s%s%s-%s%s-%s%s-%s%s-%s%s%s%s%s%s' % tuple(('{0:02x}'.format(ord(c)) for c in self.sid))) gtid += (':%d' % self.gno) return gtid
'We parse a human-generated string here. So our end value b is incremented to conform to the internal representation format.'
@staticmethod def parse_interval(interval):
m = re.search('^([0-9]+)(?:-([0-9]+))?$', interval) if (not m): raise ValueError(('GTID format is incorrect: %r' % (interval,))) a = int(m.group(1)) b = int((m.group(2) or a)) return (a, (b + 1))
'Use the internal representation format and add it to our intervals, merging if required.'
def __add_interval(self, itvl):
new = [] if (itvl[0] > itvl[1]): raise Exception(('Malformed interval %s' % (itvl,))) if any((overlap(x, itvl) for x in self.intervals)): raise Exception(('Overlapping interval %s' % (itvl,))) for existing in sorted(self.intervals): if (itvl[0] == existing[1]): itvl = (existing[0], itvl[1]) continue if (itvl[1] == existing[0]): itvl = (itvl[0], existing[1]) continue new.append(existing) self.intervals = sorted((new + [itvl]))
'Using the internal representation, remove an interval'
def __sub_interval(self, itvl):
new = [] if (itvl[0] > itvl[1]): raise Exception(('Malformed interval %s' % (itvl,))) if (not any((overlap(x, itvl) for x in self.intervals))): return for existing in sorted(self.intervals): if overlap(existing, itvl): if (existing[0] < itvl[0]): new.append((existing[0], itvl[0])) if (existing[1] > itvl[1]): new.append((itvl[1], existing[1])) else: new.append(existing) self.intervals = new
'Include the transactions of this gtid. Raise if the attempted merge has different SID'
def __add__(self, other):
if (self.sid != other.sid): raise Exception(('Attempt to merge different SID%s != %s' % (self.sid, other.sid))) result = Gtid(str(self)) for itvl in other.intervals: result.__add_interval(itvl) return result
'Remove intervals. Do not raise, if different SID simply ignore'
def __sub__(self, other):
result = Gtid(str(self)) if (self.sid != other.sid): return result for itvl in other.intervals: result.__sub_interval(itvl) return result
'We represent the human value here - a single number for one transaction, or a closed interval (decrementing b)'
def __str__(self):
return ('%s:%s' % (self.sid, ':'.join(((('%d-%d' % (x[0], (x[1] - 1))) if ((x[0] + 1) != x[1]) else str(x[0])) for x in self.intervals))))
'Push again data in data buffer. It\'s use when you want to extract a bit from a value a let the rest of the code normally read the datas'
def unread(self, data):
self.read_bytes -= len(data) self.__data_buffer += data
'Read a \'Length Coded Binary\' number from the data buffer. Length coded numbers can be anywhere from 1 to 9 bytes depending on the value of the first byte. From PyMYSQL source code'
def read_length_coded_binary(self):
c = byte2int(self.read(1)) if (c == NULL_COLUMN): return None if (c < UNSIGNED_CHAR_COLUMN): return c elif (c == UNSIGNED_SHORT_COLUMN): return self.unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH)) elif (c == UNSIGNED_INT24_COLUMN): return self.unpack_int24(self.read(UNSIGNED_INT24_LENGTH)) elif (c == UNSIGNED_INT64_COLUMN): return self.unpack_int64(self.read(UNSIGNED_INT64_LENGTH))
'Read a \'Length Coded String\' from the data buffer. A \'Length Coded String\' consists first of a length coded (unsigned, positive) integer represented in 1-9 bytes followed by that many bytes of binary data. (For example "cat" would be "3cat".) From PyMYSQL source code'
def read_length_coded_string(self):
length = self.read_length_coded_binary() if (length is None): return None return self.read(length).decode()
'Read a big endian integer values based on byte number'
def read_int_be_by_size(self, size):
if (size == 1): return struct.unpack('>b', self.read(size))[0] elif (size == 2): return struct.unpack('>h', self.read(size))[0] elif (size == 3): return self.read_int24_be() elif (size == 4): return struct.unpack('>i', self.read(size))[0] elif (size == 5): return self.read_int40_be() elif (size == 8): return struct.unpack('>l', self.read(size))[0]
'Read a little endian integer values based on byte number'
def read_uint_by_size(self, size):
if (size == 1): return self.read_uint8() elif (size == 2): return self.read_uint16() elif (size == 3): return self.read_uint24() elif (size == 4): return self.read_uint32() elif (size == 5): return self.read_uint40() elif (size == 6): return self.read_uint48() elif (size == 7): return self.read_uint56() elif (size == 8): return self.read_uint64()
'Read a string with length coded using pascal style. The string start by the size of the string'
def read_length_coded_pascal_string(self, size):
length = self.read_uint_by_size(size) return self.read(length)
'Test different combinations of digest auth parameters'
def test_digest_auth(self):
username = 'user' password = 'passwd' for qop in (None, 'auth', 'auth-int'): for algorithm in (None, 'MD5', 'SHA-256'): for body in (None, '', 'request payload'): for stale_after in ((None, 1, 4) if algorithm else (None,)): self._test_digest_auth(username, password, qop, algorithm, body, stale_after)
'Test different combinations of digest auth parameters'
def test_digest_auth_wrong_pass(self):
username = 'user' password = 'passwd' for qop in (None, 'auth', 'auth-int'): for algorithm in (None, 'MD5', 'SHA-256'): for body in (None, '', 'request payload'): self._test_digest_auth_wrong_pass(username, password, qop, algorithm, body, 3)
'Create simple data set with headers.'
def setUp(self):
global data, book data = tablib.Dataset() book = tablib.Databook() self.headers = ('first_name', 'last_name', 'gpa') self.john = ('John', 'Adams', 90) self.george = ('George', 'Washington', 67) self.tom = ('Thomas', 'Jefferson', 50) self.founders = tablib.Dataset(headers=self.headers, title='Founders') self.founders.append(self.john) self.founders.append(self.george) self.founders.append(self.tom)
'Teardown.'
def tearDown(self):
pass
'Verify append() correctly adds tuple with no headers.'
def test_empty_append(self):
new_row = (1, 2, 3) data.append(new_row) self.assertTrue((data.width == len(new_row))) self.assertTrue((data[0] == new_row))
'Verify append() correctly detects mismatch of number of headers and data.'
def test_empty_append_with_headers(self):
data.headers = ['first', 'second'] new_row = (1, 2, 3, 4) self.assertRaises(tablib.InvalidDimensions, data.append, new_row)
'Verify headers correctly detects mismatch of number of headers and data.'
def test_set_headers_with_incorrect_dimension(self):
data.append(self.john) def set_header_callable(): data.headers = ['first_name'] self.assertRaises(tablib.InvalidDimensions, set_header_callable)
'Verify adding column works with/without headers.'
def test_add_column(self):
data.append(['kenneth']) data.append(['bessie']) new_col = ['reitz', 'monke'] data.append_col(new_col) self.assertEqual(data[0], ('kenneth', 'reitz')) self.assertEqual(data.width, 2) data.headers = ('fname', 'lname') new_col = [21, 22] data.append_col(new_col, header='age') self.assertEqual(data['age'], new_col)
'Verify adding new column with no headers.'
def test_add_column_no_data_no_headers(self):
new_col = ('reitz', 'monke') data.append_col(new_col) self.assertEqual(data[0], tuple([new_col[0]])) self.assertEqual(data.width, 1) self.assertEqual(data.height, len(new_col))
'Verify append_col() ignores the header if data.headers has not previously been set'
def test_add_column_with_header_ignored(self):
new_col = ('reitz', 'monke') data.append_col(new_col, header='first_name') self.assertEqual(data[0], tuple([new_col[0]])) self.assertEqual(data.width, 1) self.assertEqual(data.height, len(new_col)) self.assertEqual(data.headers, None)
'Verify append_col() with header correctly detects mismatch when headers exist but there is no existing row data'
def test_add_column_with_header_and_headers_only_exist(self):
data.headers = ['first_name'] new_col = 'allen' def append_col_callable(): data.append_col(new_col, header='middle_name') self.assertRaises(tablib.InvalidDimensions, append_col_callable)
'Verify append_col() works when headers and rows exists'
def test_add_column_with_header_and_data_exists(self):
data.headers = self.headers data.append(self.john) new_col = [10] data.append_col(new_col, header='age') self.assertEqual(data.height, 1) self.assertEqual(data.width, (len(self.john) + 1)) self.assertEqual(data['age'], new_col) self.assertEqual(len(data.headers), (len(self.headers) + 1))
'Verify adding column with values specified as callable.'
def test_add_callable_column(self):
new_col = (lambda x: x[0]) self.founders.append_col(new_col, header='first_again')
'Verify slicing by headers.'
def test_header_slicing(self):
self.assertEqual(self.founders['first_name'], [self.john[0], self.george[0], self.tom[0]]) self.assertEqual(self.founders['last_name'], [self.john[1], self.george[1], self.tom[1]]) self.assertEqual(self.founders['gpa'], [self.john[2], self.george[2], self.tom[2]])
'Verify getting columns by index'
def test_get_col(self):
self.assertEqual(self.founders.get_col(list(self.headers).index('first_name')), [self.john[0], self.george[0], self.tom[0]]) self.assertEqual(self.founders.get_col(list(self.headers).index('last_name')), [self.john[1], self.george[1], self.tom[1]]) self.assertEqual(self.founders.get_col(list(self.headers).index('gpa')), [self.john[2], self.george[2], self.tom[2]])
'Verify slicing by data.'
def test_data_slicing(self):
self.assertEqual(self.founders[0], self.john) self.assertEqual(self.founders[:1], [self.john]) self.assertEqual(self.founders[1:2], [self.george]) self.assertEqual(self.founders[(-1)], self.tom) self.assertEqual(self.founders[3:], []) self.assertEqual(self.founders[:], [self.john, self.george, self.tom]) self.assertEqual(self.founders[0:2], [self.john, self.george]) self.assertEqual(self.founders[1:3], [self.george, self.tom]) self.assertEqual(self.founders[2:], [self.tom])
'Verify Row\'s __getslice__ method. Issue #184.'
def test_row_slicing(self):
john = Row(self.john) self.assertEqual(john[:], list(self.john[:])) self.assertEqual(john[0:], list(self.john[0:])) self.assertEqual(john[:2], list(self.john[:2])) self.assertEqual(john[0:2], list(self.john[0:2])) self.assertEqual(john[0:(-1)], list(self.john[0:(-1)]))
'Verify deleting from dataset works.'
def test_delete(self):
del self.founders[0] self.assertEqual(self.founders[:], [self.george, self.tom]) self.assertEqual(self.founders.height, 2) self.assertEqual(self.founders.width, 3) del self.founders[1] self.assertEqual(self.founders[:], [self.george]) self.assertEqual(self.founders.height, 1) self.assertEqual(self.founders.width, 3) self.assertRaises(IndexError, self.founders.__delitem__, 3)
'Verify exporting dataset object as CSV.'
def test_csv_export(self):
csv = '' for col in self.headers: csv += (col + ',') csv = (csv.strip(',') + '\r\n') for founder in self.founders: for col in founder: csv += (str(col) + ',') csv = (csv.strip(',') + '\r\n') self.assertEqual(csv, self.founders.csv)
'Verify exporting dataset object as TSV.'
def test_tsv_export(self):
tsv = '' for col in self.headers: tsv += (col + ' DCTB ') tsv = (tsv.strip(' DCTB ') + '\r\n') for founder in self.founders: for col in founder: tsv += (str(col) + ' DCTB ') tsv = (tsv.strip(' DCTB ') + '\r\n') self.assertEqual(tsv, self.founders.tsv)
'HTML export'
def test_html_export(self):
html = markup.page() html.table.open() html.thead.open() html.tr(markup.oneliner.th(self.founders.headers)) html.thead.close() for founder in self.founders: html.tr(markup.oneliner.td(founder)) html.table.close() html = str(html) self.assertEqual(html, self.founders.html)
'HTML export'
def test_html_export_none_value(self):
html = markup.page() html.table.open() html.thead.open() html.tr(markup.oneliner.th(['foo', '', 'bar'])) html.thead.close() html.tr(markup.oneliner.td(['foo', '', 'bar'])) html.table.close() html = str(html) headers = ['foo', None, 'bar'] d = tablib.Dataset(['foo', None, 'bar'], headers=headers) self.assertEqual(html, d.html)
'LaTeX export'
def test_latex_export(self):
expected = '% Note: add \\usepackage{booktabs} to your preamble\n%\n\\begin{table}[!htbp]\n \\centering\n \\caption{Founders}\n \\begin{tabular}{lrr}\n \\toprule\n first\\_name & last\\_name & gpa \\\\\n \\cmidrule(r){1-1} \\cmidrule(lr){2-2} \\cmidrule(l){3-3}\n John & Adams & 90 \\\\\n George & Washington & 67 \\\\\n Thomas & Jefferson & 50 \\\\\n \\bottomrule\n \\end{tabular}\n\\end{table}\n' output = self.founders.latex self.assertEqual(output, expected)
'Passes in a single unicode character and exports.'
def test_unicode_append(self):
if is_py3: new_row = ('\xc3\xa5', '\xc3\xa9') else: exec "new_row = (u'\xc3\xa5', u'\xc3\xa9')" data.append(new_row) data.json data.yaml data.csv data.tsv data.xls data.xlsx data.ods data.html data.latex
'Passes in a single datetime and a single date and exports.'
def test_datetime_append(self):
new_row = (datetime.datetime.now(), datetime.datetime.today()) data.append(new_row) data.json data.yaml data.csv data.tsv data.xls data.xlsx data.ods data.html data.latex
'Test that various exports don\'t error out.'
def test_book_export_no_exceptions(self):
book = tablib.Databook() book.add_sheet(data) book.json book.yaml book.xls book.xlsx book.ods book.html
'Generate and import JSON set serialization.'
def test_json_import_set(self):
data.append(self.john) data.append(self.george) data.headers = self.headers _json = data.json data.json = _json self.assertEqual(json.loads(_json), json.loads(data.json))
'Generate and import JSON book serialization.'
def test_json_import_book(self):
data.append(self.john) data.append(self.george) data.headers = self.headers book.add_sheet(data) _json = book.json book.json = _json self.assertEqual(json.loads(_json), json.loads(book.json))
'Generate and import YAML set serialization.'
def test_yaml_import_set(self):
data.append(self.john) data.append(self.george) data.headers = self.headers _yaml = data.yaml data.yaml = _yaml self.assertEqual(_yaml, data.yaml)
'Generate and import YAML book serialization.'
def test_yaml_import_book(self):
data.append(self.john) data.append(self.george) data.headers = self.headers book.add_sheet(data) _yaml = book.yaml book.yaml = _yaml self.assertEqual(_yaml, book.yaml)
'Generate and import CSV set serialization.'
def test_csv_import_set(self):
data.append(self.john) data.append(self.george) data.headers = self.headers _csv = data.csv data.csv = _csv self.assertEqual(_csv, data.csv)
'Test for proper output with semicolon separated CSV.'
def test_csv_import_set_semicolons(self):
data.append(self.john) data.append(self.george) data.headers = self.headers _csv = data.get_csv(delimiter=';') data.set_csv(_csv, delimiter=';') self.assertEqual(_csv, data.get_csv(delimiter=';'))
'Generate and import CSV set serialization when row values have spaces.'
def test_csv_import_set_with_spaces(self):
data.append(('Bill Gates', 'Microsoft')) data.append(('Steve Jobs', 'Apple')) data.headers = ('Name', 'Company') _csv = data.csv data.csv = _csv self.assertEqual(_csv, data.csv)
'Generate and import semicolon separated CSV set serialization when row values have spaces.'
def test_csv_import_set_semicolon_with_spaces(self):
data.append(('Bill Gates', 'Microsoft')) data.append(('Steve Jobs', 'Apple')) data.headers = ('Name', 'Company') _csv = data.get_csv(delimiter=';') data.set_csv(_csv, delimiter=';') self.assertEqual(_csv, data.get_csv(delimiter=';'))
'Generate and import CSV set serialization when row values have newlines.'
def test_csv_import_set_with_newlines(self):
data.append(('Markdown\n=======', 'A cool language\n\nwith paragraphs')) data.append(('reStructedText\n==============', 'Another cool language\n\nwith paragraphs')) data.headers = ('title', 'body') _csv = data.csv data.csv = _csv self.assertEqual(_csv, data.csv)
'Generate and import TSV set serialization.'
def test_tsv_import_set(self):
data.append(self.john) data.append(self.george) data.headers = self.headers _tsv = data.tsv data.tsv = _tsv self.assertEqual(_tsv, data.tsv)
'Test DBF import.'
def test_dbf_export_set(self):
data.append(self.john) data.append(self.george) data.append(self.tom) data.headers = self.headers _regression_dbf = '\x03r\x06\x06\x03\x00\x00\x00\x81\x00\xab\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r' _regression_dbf += (' John' + (' ' * 75)) _regression_dbf += (' Adams' + (' ' * 74)) _regression_dbf += ' 90.0000000' _regression_dbf += (' George' + (' ' * 73)) _regression_dbf += (' Washington' + (' ' * 69)) _regression_dbf += ' 67.0000000' _regression_dbf += (' Thomas' + (' ' * 73)) _regression_dbf += (' Jefferson' + (' ' * 70)) _regression_dbf += ' 50.0000000' _regression_dbf += '\x1a' if is_py3: pass try: self.assertEqual(_regression_dbf, data.dbf) except AssertionError: index = 0 found_so_far = '' for (reg_char, data_char) in zip(_regression_dbf, data.dbf): if ((reg_char != data_char) and (index not in [1, 2, 3])): raise AssertionError(('Failing at char %s: %s vs %s (found %s)' % (index, reg_char, data_char, found_so_far))) index += 1
'Test the DBF format detection.'
def test_dbf_format_detect(self):
_dbf = '\x03r\x06\x03\x03\x00\x00\x00\x81\x00\xab\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r' _dbf += (' John' + (' ' * 75)) _dbf += (' Adams' + (' ' * 74)) _dbf += ' 90.0000000' _dbf += (' George' + (' ' * 73)) _dbf += (' Washington' + (' ' * 69)) _dbf += ' 67.0000000' _dbf += (' Thomas' + (' ' * 73)) _dbf += (' Jefferson' + (' ' * 70)) _dbf += ' 50.0000000' _dbf += '\x1a' _yaml = '- {age: 90, first_name: John, last_name: Adams}' _tsv = 'foo DCTB bar' _csv = '1,2,3\n4,5,6\n7,8,9\n' _json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]' _bunk = '\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa3\xe2\x84\xa2\xe2\x88\x9e\xc2\xa2\xc2\xa3\xc2\xa7\xe2\x88\x9e\xc2\xa7\xc2\xb6\xe2\x80\xa2\xc2\xb6\xc2\xaa\xe2\x88\x9e\xc2\xb6\xe2\x80\xa2\xc2\xaa\xc2\xba\xe2\x80\xa2\xe2\x80\xa2\xc2\xaa\xe2\x80\x93\xc2\xba\xc2\xa7\xe2\x80\xa2\xe2\x80\xa0\xe2\x80\xa2\xc2\xa7\xc2\xba\xc2\xb6\xe2\x80\xa2\xe2\x80\xa0\xc2\xa5\xc2\xaa\xe2\x80\x93\xc2\xba\xe2\x80\xa2\xc2\xa7\xc6\x92\xc3\xb8\xc2\xa5\xc2\xa8\xc2\xa9\xcf\x80\xc6\x92\xc3\xb8\xe2\x80\xa0\xcb\x86\xc2\xa5\xc3\xa7\xc2\xa9\xc2\xa8\xe2\x88\x9a\xc3\xb8\xcb\x86\xc2\xa5\xe2\x89\x88\xe2\x80\xa0\xc6\x92\xc2\xa5\xc3\xa7\xc2\xa9\xc3\xb8\xc2\xa8\xc3\xa7\xcb\x86\xc2\xa5\xc6\x92\xc3\xa7\xc3\xb8\xc2\xb6' self.assertTrue(tablib.formats.dbf.detect(_dbf)) self.assertFalse(tablib.formats.dbf.detect(_yaml)) self.assertFalse(tablib.formats.dbf.detect(_tsv)) self.assertFalse(tablib.formats.dbf.detect(_csv)) self.assertFalse(tablib.formats.dbf.detect(_json)) self.assertFalse(tablib.formats.dbf.detect(_bunk))
'Test CSV format detection.'
def test_csv_format_detect(self):
_csv = '1,2,3\n4,5,6\n7,8,9\n' _bunk = '\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa3\xe2\x84\xa2\xe2\x88\x9e\xc2\xa2\xc2\xa3\xc2\xa7\xe2\x88\x9e\xc2\xa7\xc2\xb6\xe2\x80\xa2\xc2\xb6\xc2\xaa\xe2\x88\x9e\xc2\xb6\xe2\x80\xa2\xc2\xaa\xc2\xba\xe2\x80\xa2\xe2\x80\xa2\xc2\xaa\xe2\x80\x93\xc2\xba\xc2\xa7\xe2\x80\xa2\xe2\x80\xa0\xe2\x80\xa2\xc2\xa7\xc2\xba\xc2\xb6\xe2\x80\xa2\xe2\x80\xa0\xc2\xa5\xc2\xaa\xe2\x80\x93\xc2\xba\xe2\x80\xa2\xc2\xa7\xc6\x92\xc3\xb8\xc2\xa5\xc2\xa8\xc2\xa9\xcf\x80\xc6\x92\xc3\xb8\xe2\x80\xa0\xcb\x86\xc2\xa5\xc3\xa7\xc2\xa9\xc2\xa8\xe2\x88\x9a\xc3\xb8\xcb\x86\xc2\xa5\xe2\x89\x88\xe2\x80\xa0\xc6\x92\xc2\xa5\xc3\xa7\xc2\xa9\xc3\xb8\xc2\xa8\xc3\xa7\xcb\x86\xc2\xa5\xc6\x92\xc3\xa7\xc3\xb8\xc2\xb6' self.assertTrue(tablib.formats.csv.detect(_csv)) self.assertFalse(tablib.formats.csv.detect(_bunk))
'Test TSV format detection.'
def test_tsv_format_detect(self):
_tsv = '1 DCTB 2 DCTB 3\n4 DCTB 5 DCTB 6\n7 DCTB 8 DCTB 9\n' _bunk = '\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa3\xe2\x84\xa2\xe2\x88\x9e\xc2\xa2\xc2\xa3\xc2\xa7\xe2\x88\x9e\xc2\xa7\xc2\xb6\xe2\x80\xa2\xc2\xb6\xc2\xaa\xe2\x88\x9e\xc2\xb6\xe2\x80\xa2\xc2\xaa\xc2\xba\xe2\x80\xa2\xe2\x80\xa2\xc2\xaa\xe2\x80\x93\xc2\xba\xc2\xa7\xe2\x80\xa2\xe2\x80\xa0\xe2\x80\xa2\xc2\xa7\xc2\xba\xc2\xb6\xe2\x80\xa2\xe2\x80\xa0\xc2\xa5\xc2\xaa\xe2\x80\x93\xc2\xba\xe2\x80\xa2\xc2\xa7\xc6\x92\xc3\xb8\xc2\xa5\xc2\xa8\xc2\xa9\xcf\x80\xc6\x92\xc3\xb8\xe2\x80\xa0\xcb\x86\xc2\xa5\xc3\xa7\xc2\xa9\xc2\xa8\xe2\x88\x9a\xc3\xb8\xcb\x86\xc2\xa5\xe2\x89\x88\xe2\x80\xa0\xc6\x92\xc2\xa5\xc3\xa7\xc2\xa9\xc3\xb8\xc2\xa8\xc3\xa7\xcb\x86\xc2\xa5\xc6\x92\xc3\xa7\xc3\xb8\xc2\xb6' self.assertTrue(tablib.formats.tsv.detect(_tsv)) self.assertFalse(tablib.formats.tsv.detect(_bunk))
'Test JSON format detection.'
def test_json_format_detect(self):
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]' _bunk = '\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa3\xe2\x84\xa2\xe2\x88\x9e\xc2\xa2\xc2\xa3\xc2\xa7\xe2\x88\x9e\xc2\xa7\xc2\xb6\xe2\x80\xa2\xc2\xb6\xc2\xaa\xe2\x88\x9e\xc2\xb6\xe2\x80\xa2\xc2\xaa\xc2\xba\xe2\x80\xa2\xe2\x80\xa2\xc2\xaa\xe2\x80\x93\xc2\xba\xc2\xa7\xe2\x80\xa2\xe2\x80\xa0\xe2\x80\xa2\xc2\xa7\xc2\xba\xc2\xb6\xe2\x80\xa2\xe2\x80\xa0\xc2\xa5\xc2\xaa\xe2\x80\x93\xc2\xba\xe2\x80\xa2\xc2\xa7\xc6\x92\xc3\xb8\xc2\xa5\xc2\xa8\xc2\xa9\xcf\x80\xc6\x92\xc3\xb8\xe2\x80\xa0\xcb\x86\xc2\xa5\xc3\xa7\xc2\xa9\xc2\xa8\xe2\x88\x9a\xc3\xb8\xcb\x86\xc2\xa5\xe2\x89\x88\xe2\x80\xa0\xc6\x92\xc2\xa5\xc3\xa7\xc2\xa9\xc3\xb8\xc2\xa8\xc3\xa7\xcb\x86\xc2\xa5\xc6\x92\xc3\xa7\xc3\xb8\xc2\xb6' self.assertTrue(tablib.formats.json.detect(_json)) self.assertFalse(tablib.formats.json.detect(_bunk))
'Test YAML format detection.'
def test_yaml_format_detect(self):
_yaml = '- {age: 90, first_name: John, last_name: Adams}' _tsv = 'foo DCTB bar' _bunk = '\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1---///\n\n\n\xc2\xa1\xc2\xa1\xc2\xa3\xe2\x84\xa2\xe2\x88\x9e\xc2\xa2\xc2\xa3\xc2\xa7\xe2\x88\x9e\xc2\xa7\xc2\xb6\xe2\x80\xa2\xc2\xb6\xc2\xaa\xe2\x88\x9e\xc2\xb6\xe2\x80\xa2\xc2\xaa\xc2\xba\xe2\x80\xa2\xe2\x80\xa2\xc2\xaa\xe2\x80\x93\xc2\xba\xc2\xa7\xe2\x80\xa2\xe2\x80\xa0\xe2\x80\xa2\xc2\xa7\xc2\xba\xc2\xb6\xe2\x80\xa2\xe2\x80\xa0\xc2\xa5\xc2\xaa\xe2\x80\x93\xc2\xba\xe2\x80\xa2\xc2\xa7\xc6\x92\xc3\xb8\xc2\xa5\xc2\xa8\xc2\xa9\xcf\x80\xc6\x92\xc3\xb8\xe2\x80\xa0\xcb\x86\xc2\xa5\xc3\xa7\xc2\xa9\xc2\xa8\xe2\x88\x9a\xc3\xb8\xcb\x86\xc2\xa5\xe2\x89\x88\xe2\x80\xa0\xc6\x92\xc2\xa5\xc3\xa7\xc2\xa9\xc3\xb8\xc2\xa8\xc3\xa7\xcb\x86\xc2\xa5\xc6\x92\xc3\xa7\xc3\xb8\xc2\xb6' self.assertTrue(tablib.formats.yaml.detect(_yaml)) self.assertFalse(tablib.formats.yaml.detect(_bunk)) self.assertFalse(tablib.formats.yaml.detect(_tsv))
'Test auto format detection.'
def test_auto_format_detect(self):
_yaml = '- {age: 90, first_name: John, last_name: Adams}' _json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]' _csv = '1,2,3\n4,5,6\n7,8,9\n' _tsv = '1 DCTB 2 DCTB 3\n4 DCTB 5 DCTB 6\n7 DCTB 8 DCTB 9\n' _bunk = '\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1\xc2\xa1---///\n\n\n\xc2\xa1\xc2\xa1\xc2\xa3\xe2\x84\xa2\xe2\x88\x9e\xc2\xa2\xc2\xa3\xc2\xa7\xe2\x88\x9e\xc2\xa7\xc2\xb6\xe2\x80\xa2\xc2\xb6\xc2\xaa\xe2\x88\x9e\xc2\xb6\xe2\x80\xa2\xc2\xaa\xc2\xba\xe2\x80\xa2\xe2\x80\xa2\xc2\xaa\xe2\x80\x93\xc2\xba\xc2\xa7\xe2\x80\xa2\xe2\x80\xa0\xe2\x80\xa2\xc2\xa7\xc2\xba\xc2\xb6\xe2\x80\xa2\xe2\x80\xa0\xc2\xa5\xc2\xaa\xe2\x80\x93\xc2\xba\xe2\x80\xa2\xc2\xa7\xc6\x92\xc3\xb8\xc2\xa5\xc2\xa8\xc2\xa9\xcf\x80\xc6\x92\xc3\xb8\xe2\x80\xa0\xcb\x86\xc2\xa5\xc3\xa7\xc2\xa9\xc2\xa8\xe2\x88\x9a\xc3\xb8\xcb\x86\xc2\xa5\xe2\x89\x88\xe2\x80\xa0\xc6\x92\xc2\xa5\xc3\xa7\xc2\xa9\xc3\xb8\xc2\xa8\xc3\xa7\xcb\x86\xc2\xa5\xc6\x92\xc3\xa7\xc3\xb8\xc2\xb6' self.assertEqual(tablib.detect_format(_yaml), 'yaml') self.assertEqual(tablib.detect_format(_csv), 'csv') self.assertEqual(tablib.detect_format(_tsv), 'tsv') self.assertEqual(tablib.detect_format(_json), 'json') self.assertEqual(tablib.detect_format(_bunk), None)
'Transpose a dataset.'
def test_transpose(self):
transposed_founders = self.founders.transpose() first_row = transposed_founders[0] second_row = transposed_founders[1] self.assertEqual(transposed_founders.headers, ['first_name', 'John', 'George', 'Thomas']) self.assertEqual(first_row, ('last_name', 'Adams', 'Washington', 'Jefferson')) self.assertEqual(second_row, ('gpa', 90, 67, 50))
'Row stacking.'
def test_row_stacking(self):
to_join = tablib.Dataset(headers=self.founders.headers) for row in self.founders: to_join.append(row=row) row_stacked = self.founders.stack(to_join) for column in row_stacked.headers: original_data = self.founders[column] expected_data = (original_data + original_data) self.assertEqual(row_stacked[column], expected_data)
'Column stacking'
def test_column_stacking(self):
to_join = tablib.Dataset(headers=self.founders.headers) for row in self.founders: to_join.append(row=row) column_stacked = self.founders.stack_cols(to_join) for (index, row) in enumerate(column_stacked): original_data = self.founders[index] expected_data = (original_data + original_data) self.assertEqual(row, expected_data) self.assertEqual(column_stacked[0], ('John', 'Adams', 90, 'John', 'Adams', 90))
'Sort columns.'
def test_sorting(self):
sorted_data = self.founders.sort(col='first_name') self.assertEqual(sorted_data.title, 'Founders') first_row = sorted_data[0] second_row = sorted_data[2] third_row = sorted_data[1] expected_first = self.founders[1] expected_second = self.founders[2] expected_third = self.founders[0] self.assertEqual(first_row, expected_first) self.assertEqual(second_row, expected_second) self.assertEqual(third_row, expected_third)
'Unique Rows.'
def test_remove_duplicates(self):
self.founders.append(self.john) self.founders.append(self.george) self.founders.append(self.tom) self.assertEqual(self.founders[0], self.founders[3]) self.assertEqual(self.founders[1], self.founders[4]) self.assertEqual(self.founders[2], self.founders[5]) self.assertEqual(self.founders.height, 6) self.founders.remove_duplicates() self.assertEqual(self.founders[0], self.john) self.assertEqual(self.founders[1], self.george) self.assertEqual(self.founders[2], self.tom) self.assertEqual(self.founders.height, 3)
'Purge a dataset.'
def test_wipe(self):
new_row = (1, 2, 3) data.append(new_row) self.assertTrue((data.width == len(new_row))) self.assertTrue((data[0] == new_row)) data.wipe() new_row = (1, 2, 3, 4) data.append(new_row) self.assertTrue((data.width == len(new_row))) self.assertTrue((data[0] == new_row))
'Create a subset of a dataset'
def test_subset(self):
rows = (0, 2) columns = ('first_name', 'gpa') data.headers = self.headers data.append(self.john) data.append(self.george) data.append(self.tom) subset = data.subset(rows=rows, cols=columns) self.assertEqual(type(subset), tablib.Dataset) self.assertEqual(subset.headers, list(columns)) self.assertEqual(subset._data[0].list, ['John', 90]) self.assertEqual(subset._data[1].list, ['Thomas', 50])
'Confirm formatters are being triggered.'
def test_formatters(self):
def _formatter(cell_value): return str(cell_value).upper() self.founders.add_formatter('last_name', _formatter) for name in [r['last_name'] for r in self.founders.dict]: self.assertTrue(name.isupper())
'Check if unicode in csv export doesn\'t raise.'
def test_unicode_csv(self):
data = tablib.Dataset() if (sys.version_info[0] > 2): data.append(['\xfc', '\xfd']) else: exec "data.append([u'\xfc', u'\xfd'])" data.csv
'Build up a CSV and test selecting a column'
def test_csv_column_select(self):
data = tablib.Dataset() data.csv = self.founders.csv headers = data.headers self.assertTrue(isinstance(headers[0], unicode)) orig_first_name = self.founders[self.headers[0]] csv_first_name = data[headers[0]] self.assertEqual(orig_first_name, csv_first_name)
'Build up a CSV and test deleting a column'
def test_csv_column_delete(self):
data = tablib.Dataset() data.csv = self.founders.csv target_header = data.headers[0] self.assertTrue(isinstance(target_header, unicode)) del data[target_header] self.assertTrue((target_header not in data.headers))
'Build up a CSV and test sorting a column by name'
def test_csv_column_sort(self):
data = tablib.Dataset() data.csv = self.founders.csv orig_target_header = self.founders.headers[1] target_header = data.headers[1] self.founders.sort(orig_target_header) data.sort(target_header) self.assertEqual(self.founders[orig_target_header], data[target_header])
'Test CSV import and export with formatter configuration.'
def test_csv_formatter_support_kwargs(self):
data.append(self.john) data.append(self.george) data.headers = self.headers expected = 'first_name;last_name;gpa\nJohn;Adams;90\nGeorge;Washington;67\n' kwargs = dict(delimiter=';', lineterminator='\n') _csv = data.export('csv', **kwargs) self.assertEqual(expected, _csv) d1 = tablib.import_set(_csv, format='csv') self.assertEqual(1, len(d1.headers)) d2 = tablib.import_set(_csv, format='csv', **kwargs) self.assertEqual(3, len(d2.headers))
'Test XLSX export with formatter configuration.'
def test_databook_formatter_support_kwargs(self):
self.founders.export('xlsx', freeze_panes=False)
'Test XLSX export with new line in content.'
def test_databook_formatter_with_new_lines(self):
self.founders.append(('First\nSecond', 'Name', 42)) self.founders.export('xlsx')
'Tuple representation of :class:`Row`.'
@property def tuple(self):
return tuple(self._row)
'List representation of :class:`Row`.'
@property def list(self):
return list(self._row)
'Returns true if current row contains tag.'
def has_tag(self, tag):
if (tag == None): return False elif isinstance(tag, str): return (tag in self.tags) else: return bool(len((set(tag) & set(self.tags))))
'Adds format properties.'
@classmethod def _register_formats(cls):
for fmt in formats.available: try: try: setattr(cls, fmt.title, property(fmt.export_set, fmt.import_set)) setattr(cls, ('get_%s' % fmt.title), fmt.export_set) setattr(cls, ('set_%s' % fmt.title), fmt.import_set) cls._formats[fmt.title] = (fmt.export_set, fmt.import_set) except AttributeError: setattr(cls, fmt.title, property(fmt.export_set)) setattr(cls, ('get_%s' % fmt.title), fmt.export_set) cls._formats[fmt.title] = (fmt.export_set, None) except AttributeError: cls._formats[fmt.title] = (None, None)
'Assures size of every row in dataset is of proper proportions.'
def _validate(self, row=None, col=None, safety=False):
if row: is_valid = ((len(row) == self.width) if self.width else True) elif col: if (len(col) < 1): is_valid = True else: is_valid = ((len(col) == self.height) if self.height else True) else: is_valid = all(((len(x) == self.width) for x in self._data)) if is_valid: return True else: if (not safety): raise InvalidDimensions return False
'Packages Dataset into lists of dictionaries for transmission.'
def _package(self, dicts=True, ordered=True):
_data = list(self._data) if ordered: dict_pack = OrderedDict else: dict_pack = dict if self._formatters: for (row_i, row) in enumerate(_data): for (col, callback) in self._formatters: try: if (col is None): for (j, c) in enumerate(row): _data[row_i][j] = callback(c) else: _data[row_i][col] = callback(row[col]) except IndexError: raise InvalidDatasetIndex if self.headers: if dicts: data = [dict_pack(list(zip(self.headers, data_row))) for data_row in _data] else: data = ([list(self.headers)] + list(_data)) else: data = [list(row) for row in _data] return data
'An *optional* list of strings to be used for header rows and attribute names. This must be set manually. The given list length must equal :class:`Dataset.width`.'
def _get_headers(self):
return self.__headers
'Validating headers setter.'
def _set_headers(self, collection):
self._validate(collection) if collection: try: self.__headers = list(collection) except TypeError: raise TypeError else: self.__headers = None
'A native Python representation of the :class:`Dataset` object. If headers have been set, a list of Python dictionaries will be returned. If no headers have been set, a list of tuples (rows) will be returned instead. A dataset object can also be imported by setting the `Dataset.dict` attribute: :: data = tablib.Dataset() data.dict = [{\'age\': 90, \'first_name\': \'Kenneth\', \'last_name\': \'Reitz\'}]'
def _get_dict(self):
return self._package()
'A native Python representation of the Dataset object. If headers have been set, a list of Python dictionaries will be returned. If no headers have been set, a list of tuples (rows) will be returned instead. A dataset object can also be imported by setting the :class:`Dataset.dict` attribute. :: data = tablib.Dataset() data.dict = [{\'age\': 90, \'first_name\': \'Kenneth\', \'last_name\': \'Reitz\'}]'
def _set_dict(self, pickle):
if (not len(pickle)): return if isinstance(pickle[0], list): self.wipe() for row in pickle: self.append(Row(row)) elif isinstance(pickle[0], dict): self.wipe() self.headers = list(pickle[0].keys()) for row in pickle: self.append(Row(list(row.values()))) else: raise UnsupportedFormat
'Prepares the given column for insert/append.'
def _clean_col(self, col):
col = list(col) if self.headers: header = [col.pop(0)] else: header = [] if ((len(col) == 1) and hasattr(col[0], '__call__')): col = list(map(col[0], self._data)) col = tuple((header + col)) return col
'The number of rows currently in the :class:`Dataset`. Cannot be directly modified.'
@property def height(self):
return len(self._data)
'The number of columns currently in the :class:`Dataset`. Cannot be directly modified.'
@property def width(self):
try: return len(self._data[0]) except IndexError: try: return len(self.headers) except TypeError: return 0
'Import `in_stream` to the :class:`Dataset` object using the `format`. :param \*\*kwargs: (optional) custom configuration to the format `import_set`.'
def load(self, in_stream, format=None, **kwargs):
if (not format): format = detect_format(in_stream) (export_set, import_set) = self._formats.get(format, (None, None)) if (not import_set): raise UnsupportedFormat('Format {0} cannot be imported.'.format(format)) import_set(self, in_stream, **kwargs) return self
'Export :class:`Dataset` object to `format`. :param \*\*kwargs: (optional) custom configuration to the format `export_set`.'
def export(self, format, **kwargs):
(export_set, import_set) = self._formats.get(format, (None, None)) if (not export_set): raise UnsupportedFormat('Format {0} cannot be exported.'.format(format)) return export_set(self, **kwargs)
'A Legacy Excel Spreadsheet representation of the :class:`Dataset` object, with :ref:`separators`. Cannot be set. .. note:: XLS files are limited to a maximum of 65,000 rows. Use :class:`Dataset.xlsx` to avoid this limitation. .. admonition:: Binary Warning :class:`Dataset.xls` contains binary data, so make sure to write in binary mode:: with open(\'output.xls\', \'wb\') as f: f.write(data.xls)'
@property def xls():
pass
'An Excel \'07+ Spreadsheet representation of the :class:`Dataset` object, with :ref:`separators`. Cannot be set. .. admonition:: Binary Warning :class:`Dataset.xlsx` contains binary data, so make sure to write in binary mode:: with open(\'output.xlsx\', \'wb\') as f: f.write(data.xlsx)'
@property def xlsx():
pass
'An OpenDocument Spreadsheet representation of the :class:`Dataset` object, with :ref:`separators`. Cannot be set. .. admonition:: Binary Warning :class:`Dataset.ods` contains binary data, so make sure to write in binary mode:: with open(\'output.ods\', \'wb\') as f: f.write(data.ods)'
@property def ods():
pass