desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Fetch tile and check the existence of key with prefix in memcached'
def test_memcache_keyprefix(self):
config_file_content = '\n {\n "layers":{\n "memcache_osm":{\n "provider":{\n "name":"proxy",\n "url": "http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"\n }\n }\n },\n "cache": {\n "name": "Memcache",\n "servers": ["127.0.0.1:11211"],\n "revision": 1,\n "key prefix" : "cool_prefix"\n }\n }\n ' (tile_mimetype, tile_content) = utils.request(config_file_content, 'memcache_osm', 'png', 0, 0, 0) self.assertEqual(tile_mimetype, 'image/png') memcache_content = b64decode(self.mc.get('cool_prefix/1/memcache_osm/0/0/0.PNG').encode('ascii')) self.assertEqual(memcache_content, tile_content, 'Contents of memcached and value returned from TileStache should match') self.assertIsNone(self.mc.get('/1/memcache_osm/0/0/0.PNG'), 'Memcache value should be empty')
'Create 3 points (2 on west, 1 on east hemisphere) and retrieve as geojson. 2 points should be returned in western hemisphere and 1 on eastern at zoom level 1 (clip on)'
def test_points_geojson(self):
self.defineGeometry('POINT') point_sf = Point((-122.42), 37.78) point_berlin = Point(13.41, 52.52) point_lima = Point((-77.03), 12.04) self.insertTestRow(point_sf.wkt, 'San Francisco') self.insertTestRow(point_berlin.wkt, 'Berlin') self.insertTestRow(point_lima.wkt, 'Lima') (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'json', 0, 0, 1) geojson_result = json.loads(tile_content.decode('utf8')) self.assertTrue(tile_mimetype.endswith('/json')) self.assertEqual(geojson_result['type'], 'FeatureCollection') self.assertEqual(len(geojson_result['features']), 2) cities = [] for feature in geojson_result['features']: if (feature['properties']['name'] == 'San Francisco'): cities.append(feature['properties']['name']) self.assertTrue(point_sf.almost_equals(asShape(feature['geometry']))) elif (feature['properties']['name'] == 'Lima'): cities.append(feature['properties']['name']) self.assertTrue(point_lima.almost_equals(asShape(feature['geometry']))) self.assertTrue(('San Francisco' in cities)) self.assertTrue(('Lima' in cities)) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'json', 0, 1, 1) geojson_result = json.loads(tile_content.decode('utf8')) self.assertTrue(tile_mimetype.endswith('/json')) self.assertEqual(geojson_result['type'], 'FeatureCollection') self.assertEqual(len(geojson_result['features']), 1) self.assertTrue(('Berlin' in geojson_result['features'][0]['properties']['name']))
'Create a line that goes from west to east (clip on)'
def test_linestring_geojson(self):
self.defineGeometry('LINESTRING') geom = LineString([((-180), 32), (180, 32)]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'json', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/json')) geojson_result = json.loads(tile_content.decode('utf8')) west_hemisphere_geometry = asShape(geojson_result['features'][0]['geometry']) expected_geometry = LineString([((-180), 32), (180, 32)]) self.assertTrue(expected_geometry.almost_equals(west_hemisphere_geometry))
'Create a polygon to cover the world and make sure it is "similar" (clip on)'
def test_polygon_geojson(self):
self.defineGeometry('POLYGON') geom = Polygon([((-180), (-85.05)), (180, (-85.05)), (180, 85.05), ((-180), 85.05), ((-180), (-85.05))]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'json', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/json')) geojson_result = json.loads(tile_content.decode('utf8')) result_geom = asShape(geojson_result['features'][0]['geometry']) expected_geom = Polygon([((-180), (-85.05)), (180, (-85.05)), (180, 85.05), ((-180), 85.05), ((-180), (-85.05))]) self.assertTrue(result_geom.difference(expected_geom.buffer(0.001)).is_empty) self.assertTrue(expected_geom.difference(result_geom.buffer(0.001)).is_empty)
'Create a line that goes from west to east (clip on), and test it in MultiProvider'
def test_linestring_multi_geojson(self):
self.defineGeometry('LINESTRING') geom = LineString([((-180), 32), (180, 32)]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_multi', 'json', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/json')) geojson_result = json.loads(tile_content.decode('utf8')) (feature1, feature2) = (geojson_result['vectile_test'], geojson_result['vectile_copy']) self.assertEqual(feature1['type'], 'FeatureCollection') self.assertEqual(feature2['type'], 'FeatureCollection') self.assertEqual(feature1['features'][0]['type'], 'Feature') self.assertEqual(feature2['features'][0]['type'], 'Feature') self.assertEqual(feature1['features'][0]['geometry']['type'], 'LineString') self.assertEqual(feature2['features'][0]['geometry']['type'], 'LineString') self.assertEqual(feature1['features'][0]['id'], feature2['features'][0]['id']) self.assertTrue(('clipped' not in feature1['features'][0])) self.assertTrue(feature2['features'][0]['clipped'])
'Create 3 points (2 on west, 1 on east hemisphere) and retrieve as topojson. 2 points should be returned in western hemisphere and 1 on eastern at zoom level 1 (clip on)'
def test_points_topojson(self):
self.defineGeometry('POINT') point_sf = Point((-122.4183), 37.775) point_berlin = Point(13.4127, 52.5233) point_lima = Point((-77.0283), 12.0433) self.insertTestRow(point_sf.wkt, 'San Francisco') self.insertTestRow(point_berlin.wkt, 'Berlin') self.insertTestRow(point_lima.wkt, 'Lima') (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'topojson', 0, 0, 1) topojson_result = json.loads(tile_content.decode('utf8')) self.assertTrue(tile_mimetype.endswith('/json')) print(topojson_result) self.assertEqual(topojson_result['type'], 'Topology') self.assertEqual(len(topojson_result['objects']['vectile']['geometries']), 2) cities = [] topojson_xform = get_topo_transform(topojson_result) for feature in topojson_result['objects']['vectile']['geometries']: (lon, lat) = topojson_xform(feature['coordinates']) if (feature['properties']['name'] == 'San Francisco'): cities.append(feature['properties']['name']) self.assertTrue((hypot((point_sf.x - lon), (point_sf.y - lat)) < 1)) elif (feature['properties']['name'] == 'Lima'): cities.append(feature['properties']['name']) print(feature['coordinates']) self.assertTrue((hypot((point_lima.x - lon), (point_lima.y - lat)) < 1)) self.assertTrue(('San Francisco' in cities)) self.assertTrue(('Lima' in cities)) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'topojson', 0, 1, 1) topojson_result = json.loads(tile_content.decode('utf8')) self.assertTrue(tile_mimetype.endswith('/json')) self.assertEqual(topojson_result['type'], 'Topology') self.assertEqual(len(topojson_result['objects']['vectile']['geometries']), 1) self.assertTrue(('Berlin' in topojson_result['objects']['vectile']['geometries'][0]['properties']['name']))
'Create a line that goes from west to east (clip on)'
def test_linestring_topojson(self):
self.defineGeometry('LINESTRING') geom = LineString([((-180), 32), (180, 32)]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'topojson', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/json')) topojson_result = json.loads(tile_content.decode('utf8')) topojson_xform = get_topo_transform(topojson_result) parts = [topojson_result['arcs'][arc] for arc in topojson_result['objects']['vectile']['geometries'][0]['arcs']] parts = [map(topojson_xform, topojson_dediff(part)) for part in parts] west_hemisphere_geometry = LineString(*parts) self.assertTrue((abs((west_hemisphere_geometry.coords[0][0] + 180)) < 2)) self.assertTrue((abs((west_hemisphere_geometry.coords[1][0] - 180)) < 2)) self.assertTrue((abs((west_hemisphere_geometry.coords[0][1] - 32)) < 2)) self.assertTrue((abs((west_hemisphere_geometry.coords[1][1] - 32)) < 2))
'Create a polygon to cover the world and make sure it is "similar" (clip on)'
def test_polygon_topojson(self):
self.defineGeometry('POLYGON') geom = Polygon([((-180), (-85.0511)), (180, (-85.0511)), (180, 85.0511), ((-180), 85.0511), ((-180), (-85.0511))]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'topojson', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/json')) topojson_result = json.loads(tile_content.decode('utf8')) topojson_xform = get_topo_transform(topojson_result) parts = [topojson_result['arcs'][arc[0]] for arc in topojson_result['objects']['vectile']['geometries'][0]['arcs']] parts = [map(topojson_xform, topojson_dediff(part)) for part in parts] result_geom = Polygon(*parts) expected_geom = Polygon([((-180), (-85.0511)), (180, (-85.0511)), (180, 85.0511), ((-180), 85.0511), ((-180), (-85.0511))]) self.assertTrue(result_geom.difference(expected_geom.buffer(1)).is_empty) self.assertTrue(expected_geom.difference(result_geom.buffer(1)).is_empty)
'Create a line that goes from west to east (clip on), and test it in MultiProvider'
def test_linestring_multi_topojson(self):
self.defineGeometry('LINESTRING') geom = LineString([((-180), 32), (180, 32)]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_multi', 'topojson', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/json')) topojson_result = json.loads(tile_content.decode('utf8')) self.assertEqual(topojson_result['type'], 'Topology') self.assertEqual(topojson_result['objects']['vectile_test']['type'], 'GeometryCollection') self.assertEqual(topojson_result['objects']['vectile_copy']['type'], 'GeometryCollection') geom1 = topojson_result['objects']['vectile_test']['geometries'][0] geom2 = topojson_result['objects']['vectile_copy']['geometries'][0] self.assertEqual(geom1['type'], 'LineString') self.assertEqual(geom2['type'], 'LineString') self.assertEqual(geom1['id'], geom2['id']) self.assertTrue(('clipped' not in geom1)) self.assertTrue(geom2['clipped'])
'Create 3 points (2 on west, 1 on east hemisphere) and retrieve as pbf. 2 points should be returned in western hemisphere and 1 on eastern at zoom level 1 (clip on)'
def test_points_pbf(self):
self.defineGeometry('POINT') point_sf = Point((-122.42), 37.78) point_berlin = Point(13.41, 52.52) point_lima = Point((-77.03), 12.04) self.insertTestRow(point_sf.wkt, 'San Francisco') self.insertTestRow(point_berlin.wkt, 'Berlin') self.insertTestRow(point_lima.wkt, 'Lima') (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'pbf', 0, 0, 1) pbf_result = mapbox_vector_tile.decode(tile_content) self.assertTrue(tile_mimetype.endswith('/x-protobuf')) self.assertIn('vectile_test', pbf_result) layer_result = pbf_result['vectile_test'] self.assertEqual(len(layer_result['features']), 2) extent = tile_bounds_mercator(0, 0, 1) cities = [] for feature in layer_result['features']: if (feature['properties']['name'] == 'San Francisco'): cities.append(feature['properties']['name']) self.assertTrue(point_sf.almost_equals(decoded_pbf_asshape(feature, extent), decimal=1)) elif (feature['properties']['name'] == 'Lima'): cities.append(feature['properties']['name']) self.assertTrue(point_lima.almost_equals(decoded_pbf_asshape(feature, extent), decimal=1)) self.assertTrue(('San Francisco' in cities)) self.assertTrue(('Lima' in cities)) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'pbf', 0, 1, 1) pbf_result = mapbox_vector_tile.decode(tile_content) self.assertTrue(tile_mimetype.endswith('/x-protobuf')) self.assertIn('vectile_test', pbf_result) layer_result = pbf_result['vectile_test'] self.assertEqual(len(layer_result['features']), 1) self.assertTrue(('Berlin' in layer_result['features'][0]['properties']['name']))
'Create a line that goes from west to east (clip on) (pbf)'
def test_linestring_pbf(self):
self.defineGeometry('LINESTRING') geom = LineString([((-179), 32), (179, 32)]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'pbf', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/x-protobuf')) pbf_result = mapbox_vector_tile.decode(tile_content) layer_result = pbf_result['vectile_test'] extent = tile_bounds_mercator(0, 0, 0) west_hemisphere_geometry = decoded_pbf_asshape(layer_result['features'][0], extent) expected_geometry = LineString([((-179), 32), (179, 32)]) for (returned, expected) in zip(west_hemisphere_geometry.coords, expected_geometry.coords): self.assertEqual(round(returned[0]), expected[0]) self.assertEqual(round(returned[1]), expected[1])
'Create a polygon to cover the world and make sure it is "similar" (clip on) (pbf)'
def test_polygon_pbf(self):
self.defineGeometry('POLYGON') geom = Polygon([((-180), (-85.05)), (180, (-85.05)), (180, 85.05), ((-180), 85.05), ((-180), (-85.05))]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_test', 'pbf', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/x-protobuf')) pbf_result = mapbox_vector_tile.decode(tile_content) layer_result = pbf_result['vectile_test'] extent = tile_bounds_mercator(0, 0, 0) result_geom = decoded_pbf_asshape(layer_result['features'][0], extent) expected_geom = Polygon([((-180), (-85.05)), (180, (-85.05)), (180, 85.05), ((-180), 85.05), ((-180), (-85.05))]) self.assertTrue(result_geom.difference(expected_geom.buffer(0.01)).is_empty) self.assertTrue(expected_geom.difference(result_geom.buffer(0.01)).is_empty)
'Create a line that goes from west to east (clip on), and test it in MultiProvider (pbf)'
def test_linestring_multi_pbf(self):
self.defineGeometry('LINESTRING') geom = LineString([((-180), 32), (180, 32)]) self.insertTestRow(geom.wkt) (tile_mimetype, tile_content) = utils.request(self.config_file_content, 'vectile_multi', 'pbf', 0, 0, 0) self.assertTrue(tile_mimetype.endswith('/x-protobuf')) pbf_result = mapbox_vector_tile.decode(tile_content) (feature1, feature2) = (pbf_result['vectile_test'], pbf_result['vectile_copy']) self.assertEqual(feature1['features'][0]['type'], 2) self.assertEqual(feature2['features'][0]['type'], 2) self.assertEqual(feature1['features'][0]['id'], feature2['features'][0]['id'])
'Return tile URLs that start with file://, by first retrieving them.'
def getTileUrls(self, coord):
if (self.threadsafe or self.lock.acquire()): (mime_type, tile_data) = TileStache.getTile(self.layer, coord, 'png', self.ignore_cached) (handle, filename) = mkstemp(prefix='tilestache-compose-', suffix='.png') write(handle, tile_data) close(handle) self.files.append(filename) if (not self.threadsafe): self.lock.release() if self.verbose: size = (len(tile_data) / 1024.0) printlocked(self.lock, (self.layer.name() + ('/%(zoom)d/%(column)d/%(row)d.png' % coord.__dict__)), ('(%dKB)' % size)) return (('file://' + abspath(filename)),)
'Delete any tile that was saved in self.getTileUrls().'
def __del__(self):
for filename in self.files: unlink(filename)
'If a form has empty_permitted, no fields should get the CSS class for required. Django <= 1.8, also check `required` attribute.'
def test_empty_permitted(self):
if DBS3_SET_REQUIRED_SET_DISABLED: required_css_class = u'bootstrap3-req' form = TestForm() res = render_form_field(u'subject', {u'form': form}) self.assertIn(required_css_class, res) form.empty_permitted = True res = render_form_field(u'subject', {u'form': form}) self.assertNotIn(required_css_class, res) else: required_css_class = u'bootstrap3-req' form = TestForm() res = render_form_field(u'subject', {u'form': form}) self.assertIn(required_css_class, res) form.empty_permitted = True res = render_form_field(u'subject', {u'form': form}) self.assertNotIn(required_css_class, res)
'Asserts that a given field has an after and before addon.'
def assertFieldHasAddons(self, field):
addon_before = u'bf' addon_after = u'af' res = render_template_with_form(u'{{% bootstrap_field form.{0} addon_before="{1}" addon_after="{2}" %}}'.format(field, addon_before, addon_after)) self.assertIn(u'class="input-group"', res) self.assertIn(u'class="input-group-addon">{0}'.format(addon_before), res) self.assertIn(u'class="input-group-addon">{0}'.format(addon_after), res)
'Only relevant if DBS3_SET_REQUIRED_SET_DISABLED'
def add_required_attrs(self, widget=None):
if (widget is None): widget = self.widget if (self.set_required and is_widget_required_attribute(widget)): widget.attrs[u'required'] = u'required'
'Only relevant if DBS3_SET_REQUIRED_SET_DISABLED'
def add_disabled_attrs(self, widget=None):
if (widget is None): widget = self.widget if self.set_disabled: widget.attrs[u'disabled'] = u'disabled'
'Fix a clearable file input TODO: This needs improvement Currently Django returns Currently: <a href="dummy.txt">dummy.txt</a> <input id="file4-clear_id" name="file4-clear" type="checkbox" /> <label for="file4-clear_id">Clear</label><br /> Change: <input id="id_file4" name="file4" type="file" /> <span class=help-block></span> </div>'
def fix_clearable_file_input(self, html):
return u'<div class="row bootstrap3-multi-input"><div class="col-xs-12">{html}</div></div>'.format(html=html)
'Test to ensure that row merging yields correct results'
def test_fix_rows(self):
start = [['1', '2', '3'], [''], ['abc'], ['4', '5']] fixed = join_rows(start) self.assertEqual(4, len(fixed)) self.assertEqual(start[0][0], fixed[0]) self.assertEqual(start[0][1], fixed[1]) self.assertEqual(' '.join([start[0][(-1)], start[1][0], start[2][0], start[3][0]]), fixed[2]) self.assertEqual(start[3][1], fixed[3])
'Load test data into the DB and return it as a string for comparison.'
def csvsql(self, csv_file, db=None):
if (not db): db = ('sqlite:///' + self.db_file) args = ['--db', db, '--tables', 'foo', '--insert', csv_file] utility = CSVSQL(args) utility.run() with open(csv_file, 'r') as f: text = f.read() return text.strip()
'Convenience method returns a dict. Equivalent to ``dict(zip(self.headers,self.parse(line)))``.'
def parse_dict(self, line):
return dict(zip(self.headers, self.parse(line)))
'Constructs a schema row decoder.'
def __init__(self, header):
for (p, val_type) in self.REQUIRED_COLUMNS: try: if val_type: setattr(self, p, val_type(header.index(p))) else: setattr(self, p, header.index(p)) except ValueError: raise ValueError(('A column named "%s" must exist in the schema file.' % p))
'Return a tuple (column, start, length) based on this instance\'s parameters. If the first time this is called, the row\'s \'start\' value is 1, then all \'start\' values including the first will be one less than in the actual input data, to adjust for one-based specifications. Values for \'start\' and \'length\' will be cast to integers.'
def __call__(self, row):
if (self.one_based is None): self.one_based = (int(row[self.start]) == 1) if self.one_based: adjusted_start = (int(row[self.start]) - 1) else: adjusted_start = int(row[self.start]) return FixedWidthField(row[self.column], adjusted_start, int(row[self.length]))
'Parse a list of join columns.'
def _parse_join_column_names(self, join_string):
return list(map(str.strip, join_string.split(',')))
'Print data for a single statistic.'
def print_one(self, table, column_id, operation, label=True, **kwargs):
column_name = table.column_names[column_id] op_name = operation getter = globals().get(('get_%s' % op_name), None) with warnings.catch_warnings(): warnings.simplefilter('ignore', agate.NullCalculationWarning) try: if getter: stat = getter(table, column_id, **kwargs) else: op = OPERATIONS[op_name]['aggregation'] stat = table.aggregate(op(column_id)) if isinstance(stat, Decimal): stat = format_decimal(stat, locale=agate.config.get_option('default_locale')) except: stat = None if (op_name == 'freq'): stat = ', '.join([(u'"%s": %s' % (six.text_type(row[column_name]), row['Count'])) for row in stat]) stat = (u'{ %s }' % stat) if label: self.output_file.write((u'%3i. %s: %s\n' % ((column_id + 1), column_name, stat))) else: self.output_file.write((u'%s\n' % stat))
'Calculate stats for all valid operations.'
def calculate_stats(self, table, column_id, **kwargs):
stats = {} for (op_name, op_data) in OPERATIONS.items(): getter = globals().get(('get_%s' % op_name), None) with warnings.catch_warnings(): warnings.simplefilter('ignore', agate.NullCalculationWarning) try: if getter: stats[op_name] = getter(table, column_id, **kwargs) else: op = op_data['aggregation'] v = table.aggregate(op(column_id)) if isinstance(v, Decimal): v = format_decimal(v, locale=agate.config.get_option('default_locale')) stats[op_name] = v except: stats[op_name] = None return stats
'Print data for all statistics.'
def print_stats(self, table, column_ids, stats):
label_column_width = max([len(op_data['label']) for op_data in OPERATIONS.values()]) for column_id in column_ids: column_name = table.column_names[column_id] column = table.columns[column_id] column_stats = stats[column_id] self.output_file.write(('%3i. "%s"\n\n' % ((column_id + 1), column_name))) for (op_name, op_data) in OPERATIONS.items(): if (column_stats[op_name] is None): continue label = u'{label:{label_column_width}}'.format(**{'label_column_width': label_column_width, 'label': op_data['label']}) if (op_name == 'freq'): for (i, row) in enumerate(column_stats['freq']): if (i == 0): self.output_file.write(' DCTB {} '.format(label)) else: self.output_file.write(u' DCTB {label:{label_column_width}} '.format(**{'label_column_width': label_column_width, 'label': ''})) if isinstance(column.data_type, agate.Number): v = row[column_name] if isinstance(v, Decimal): v = format_decimal(v, locale=agate.config.get_option('default_locale')) else: v = six.text_type(row[column_name]) self.output_file.write(u'{} ({}x)\n'.format(v, row['Count'])) continue v = column_stats[op_name] if ((op_name == 'nulls') and v): v = ('%s (excluded from calculations)' % v) elif (op_name == 'len'): v = ('%s characters' % v) self.output_file.write(u' DCTB {} {}\n'.format(label, v)) self.output_file.write('\n') self.output_file.write(('Row count: %s\n' % len(table.rows)))
'Print data for all statistics as a csv table.'
def print_csv(self, table, column_ids, stats):
writer = agate.csv.writer(self.output_file) header = (['column_id', 'column_name'] + [op_name for op_name in OPERATIONS.keys()]) writer.writerow(header) for column_id in column_ids: column_name = table.column_names[column_id] column_stats = stats[column_id] output_row = [(column_id + 1), column_name] for (op_name, op_data) in OPERATIONS.items(): if (column_stats[op_name] is None): output_row.append(None) continue if (op_name == 'freq'): value = ', '.join([six.text_type(row[column_name]) for row in column_stats['freq']]) else: value = column_stats[op_name] output_row.append(value) writer.writerow(output_row)
'Inner main function. If anything fails in here, file handles and database connections will be safely closed.'
def _failsafe_main(self):
if self.connection: transaction = self.connection.begin() for f in self.input_files: try: table_name = self.table_names.pop(0) except IndexError: if (f == sys.stdin): table_name = 'stdin' else: table_name = os.path.splitext(os.path.basename(f.name))[0] table = None try: table = agate.Table.from_csv(f, skip_lines=self.args.skip_lines, sniff_limit=self.args.sniff_limit, column_types=self.get_column_types(), **self.reader_kwargs) except StopIteration: continue if table: if self.connection: table.to_sql(self.connection, table_name, overwrite=self.args.overwrite, create=(not self.args.no_create), create_if_not_exists=self.args.create_if_not_exists, insert=(self.args.insert and (len(table.rows) > 0)), prefixes=self.args.prefix, db_schema=self.args.db_schema, constraints=(not self.args.no_constraints)) else: statement = table.to_sql_create_statement(table_name, dialect=self.args.dialect, db_schema=self.args.db_schema, constraints=(not self.args.no_constraints)) self.output_file.write(('%s\n' % statement)) if self.connection: if self.args.query: if os.path.exists(self.args.query): with open(self.args.query, 'r') as f: query = f.read() else: query = self.args.query queries = query.split(';') rows = None for q in queries: if q.strip(): rows = self.connection.execute(q) output = agate.csv.writer(self.output_file, **self.writer_kwargs) output.writerow(rows._metadata.keys) for row in rows: output.writerow(row) transaction.commit()
'Perform argument processing and other setup for a CSVKitUtility.'
def __init__(self, args=None, output_file=None):
self._init_common_parser() self.add_arguments() self.args = self.argparser.parse_args(args) if (output_file is None): self.output_file = sys.stdout else: self.output_file = output_file self.reader_kwargs = self._extract_csv_reader_kwargs() self.writer_kwargs = self._extract_csv_writer_kwargs() self._install_exception_handler() try: import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) except (ImportError, AttributeError): pass
'Called upon initialization once the parser for common arguments has been constructed. Should be overriden by individual utilities.'
def add_arguments(self):
raise NotImplementedError('add_arguments must be provided by each subclass of CSVKitUtility.')
'A wrapper around the main loop of the utility which handles opening and closing files.'
def run(self):
if ('f' not in self.override_flags): self.input_file = self._open_input_file(self.args.input_path) try: self.main() finally: if ('f' not in self.override_flags): self.input_file.close()
'Main loop of the utility. Should be overriden by individual utilities and explicitly called by the executing script.'
def main(self):
raise NotImplementedError(' must be provided by each subclass of CSVKitUtility.')
'Prepare a base argparse argument parser so that flags are consistent across different shell command tools. If you want to constrain which common args are present, you can pass a string for \'omitflags\'. Any argument whose single-letter form is contained in \'omitflags\' will be left out of the configured parser. Use \'f\' for file.'
def _init_common_parser(self):
self.argparser = argparse.ArgumentParser(description=self.description, epilog=self.epilog) if ('f' not in self.override_flags): self.argparser.add_argument(metavar='FILE', nargs='?', dest='input_path', help='The CSV file to operate on. If omitted, will accept input on STDIN.') if ('d' not in self.override_flags): self.argparser.add_argument('-d', '--delimiter', dest='delimiter', help='Delimiting character of the input CSV file.') if ('t' not in self.override_flags): self.argparser.add_argument('-t', '--tabs', dest='tabs', action='store_true', help='Specify that the input CSV file is delimited with tabs. Overrides "-d".') if ('q' not in self.override_flags): self.argparser.add_argument('-q', '--quotechar', dest='quotechar', help='Character used to quote strings in the input CSV file.') if ('u' not in self.override_flags): self.argparser.add_argument('-u', '--quoting', dest='quoting', type=int, choices=[0, 1, 2, 3], help='Quoting style used in the input CSV file. 0 = Quote Minimal, 1 = Quote All, 2 = Quote Non-numeric, 3 = Quote None.') if ('b' not in self.override_flags): self.argparser.add_argument('-b', '--no-doublequote', dest='doublequote', action='store_false', help='Whether or not double quotes are doubled in the input CSV file.') if ('p' not in self.override_flags): self.argparser.add_argument('-p', '--escapechar', dest='escapechar', help='Character used to escape the delimiter if --quoting 3 ("Quote None") is specified and to escape the QUOTECHAR if --no-doublequote is specified.') if ('z' not in self.override_flags): self.argparser.add_argument('-z', '--maxfieldsize', dest='field_size_limit', type=int, help='Maximum length of a single field in the input CSV file.') if ('e' not in self.override_flags): self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8', help='Specify the encoding of the input CSV file.') if ('L' not in self.override_flags): self.argparser.add_argument('-L', '--locale', dest='locale', default='en_US', help='Specify the locale (en_US) of any formatted numbers.') if ('S' not in self.override_flags): self.argparser.add_argument('-S', '--skipinitialspace', dest='skipinitialspace', action='store_true', help='Ignore whitespace immediately following the delimiter.') if ('blanks' not in self.override_flags): self.argparser.add_argument('--blanks', dest='blanks', action='store_true', help='Do not convert "", "na", "n/a", "none", "null", "." to NULL.') if ('date-format' not in self.override_flags): self.argparser.add_argument('--date-format', dest='date_format', help='Specify a strptime date format string like "%%m/%%d/%%Y".') if ('datetime-format' not in self.override_flags): self.argparser.add_argument('--datetime-format', dest='datetime_format', help='Specify a strptime datetime format string like "%%m/%%d/%%Y %%I:%%M %%p".') if ('H' not in self.override_flags): self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true', help='Specify that the input CSV file has no header row. Will create default headers (a,b,c,...).') if ('K' not in self.override_flags): self.argparser.add_argument('-K', '--skip-lines', dest='skip_lines', type=int, default=0, help='Specify the number of initial lines to skip (e.g. comments, copyright notices, empty rows).') if ('v' not in self.override_flags): self.argparser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Print detailed tracebacks when errors occur.') if ('l' not in self.override_flags): self.argparser.add_argument('-l', '--linenumbers', dest='line_numbers', action='store_true', help='Insert a column of line numbers at the front of the output. Useful when piping to grep or as a simple primary key.') if ('zero' not in self.override_flags): self.argparser.add_argument('--zero', dest='zero_based', action='store_true', help='When interpreting or displaying column numbers, use zero-based numbering instead of the default 1-based numbering.') self.argparser.add_argument('-V', '--version', action='version', version='%(prog)s 1.0.3', help='Display version information and exit.')
'Open the input file specified on the command line.'
def _open_input_file(self, path):
if six.PY2: mode = 'rb' kwargs = {} else: mode = 'rt' kwargs = {'encoding': self.args.encoding} if ((not path) or (path == '-')): if self.buffers_input: f = six.StringIO(sys.stdin.read()) else: f = sys.stdin else: extension = splitext(path)[1] if (extension == '.gz'): f = LazyFile(gzip.open, path, mode, **kwargs) elif (extension == '.bz2'): if six.PY2: f = LazyFile(bz2.BZ2File, path, mode, **kwargs) else: f = LazyFile(bz2.open, path, mode, **kwargs) else: f = LazyFile(open, path, mode, **kwargs) return f
'Extracts those from the command-line arguments those would should be passed through to the input CSV reader(s).'
def _extract_csv_reader_kwargs(self):
kwargs = {} if self.args.tabs: kwargs['delimiter'] = ' DCTB ' elif self.args.delimiter: kwargs['delimiter'] = self.args.delimiter for arg in ('quotechar', 'quoting', 'doublequote', 'escapechar', 'field_size_limit', 'skipinitialspace'): value = getattr(self.args, arg) if (value is not None): kwargs[arg] = value if (six.PY2 and self.args.encoding): kwargs['encoding'] = self.args.encoding if getattr(self.args, 'no_header_row', None): kwargs['header'] = (not self.args.no_header_row) return kwargs
'Extracts those from the command-line arguments those would should be passed through to the output CSV writer.'
def _extract_csv_writer_kwargs(self):
kwargs = {} if getattr(self.args, 'line_numbers', None): kwargs['line_numbers'] = True return kwargs
'Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions.'
def _install_exception_handler(self):
if six.PY2: sys.stderr = codecs.getwriter('utf-8')(sys.stderr) def handler(t, value, traceback): if self.args.verbose: sys.__excepthook__(t, value, traceback) elif (t == UnicodeDecodeError): sys.stderr.write(('Your file is not "%s" encoded. Please specify the correct encoding with the -e flag. Use the -v flag to see the complete error.\n' % self.args.encoding)) else: sys.stderr.write(('%s\n' % six.text_type(value))) sys.excepthook = handler
'Pretty-prints the names and indices of all columns to a file-like object (usually sys.stdout).'
def print_column_names(self):
if getattr(self.args, 'no_header_row', None): raise RequiredHeaderError('You cannot use --no-header-row with the -n or --names options.') f = self.input_file output = self.output_file if getattr(self.args, 'zero_based', None): start = 0 else: start = 1 rows = agate.csv.reader(f, **self.reader_kwargs) column_names = next(rows) for (i, c) in enumerate(column_names, start): output.write(('%3i: %s\n' % (i, c)))
'A generator which yields rows which are ready to write to output.'
def checked_rows(self):
length = len(self.column_names) line_number = self.reader.line_num joinable_row_errors = [] for row in self.reader: try: if (len(row) != length): raise LengthMismatchError(line_number, row, length) (yield row) joinable_row_errors = [] except LengthMismatchError as e: self.errors.append(e) if (len(row) > length): joinable_row_errors = [] else: joinable_row_errors.append(e) while joinable_row_errors: fixed_row = join_rows([error.row for error in joinable_row_errors], joiner=' ') if (len(fixed_row) < length): break if (len(fixed_row) == length): self.rows_joined += len(joinable_row_errors) self.joins += 1 (yield fixed_row) for fixed in joinable_row_errors: joinable_row_errors.remove(fixed) self.errors.remove(fixed) break joinable_row_errors = joinable_row_errors[1:] except CSVTestException as e: self.errors.append(e) joinable_row_errors = [] line_number = self.reader.line_num
''
def __init__(self, batch_size=1):
self.batch_size = batch_size
'Build the graph for this configuration. Args: inputs: A dict of inputs. For training, should contain \'wav\'. Returns: A dict of outputs that includes the \'predictions\', \'init_ops\', the \'push_ops\', and the \'quantized_input\'.'
def build(self, inputs):
num_stages = 10 num_layers = 30 filter_length = 3 width = 512 skip_width = 256 num_z = 16 x = inputs['wav'] batch_size = self.batch_size x_quantized = utils.mu_law(x) x_scaled = (tf.cast(x_quantized, tf.float32) / 128.0) x_scaled = tf.expand_dims(x_scaled, 2) encoding = tf.placeholder(name='encoding', shape=[batch_size, num_z], dtype=tf.float32) en = tf.expand_dims(encoding, 1) (init_ops, push_ops) = ([], []) l = x_scaled (l, inits, pushs) = utils.causal_linear(x=l, n_inputs=1, n_outputs=width, name='startconv', rate=1, batch_size=batch_size, filter_length=filter_length) for init in inits: init_ops.append(init) for push in pushs: push_ops.append(push) s = utils.linear(l, width, skip_width, name='skip_start') for i in range(num_layers): dilation = (2 ** (i % num_stages)) (d, inits, pushs) = utils.causal_linear(x=l, n_inputs=width, n_outputs=(width * 2), name=('dilatedconv_%d' % (i + 1)), rate=dilation, batch_size=batch_size, filter_length=filter_length) for init in inits: init_ops.append(init) for push in pushs: push_ops.append(push) d += utils.linear(en, num_z, (width * 2), name=('cond_map_%d' % (i + 1))) assert ((d.get_shape().as_list()[2] % 2) == 0) m = (d.get_shape().as_list()[2] // 2) d = (tf.sigmoid(d[:, :, :m]) * tf.tanh(d[:, :, m:])) l += utils.linear(d, width, width, name=('res_%d' % (i + 1))) s += utils.linear(d, width, skip_width, name=('skip_%d' % (i + 1))) s = tf.nn.relu(s) s = (utils.linear(s, skip_width, skip_width, name='out1') + utils.linear(en, num_z, skip_width, name='cond_map_out1')) s = tf.nn.relu(s) logits = utils.linear(s, skip_width, 256, name='logits') logits = tf.reshape(logits, [(-1), 256]) probs = tf.nn.softmax(logits, name='softmax') return {'init_ops': init_ops, 'push_ops': push_ops, 'predictions': probs, 'encoding': encoding, 'quantized_input': x_quantized}
'Condition the input on the encoding. Args: x: The [mb, length, channels] float tensor input. encoding: The [mb, encoding_length, channels] float tensor encoding. Returns: The output after broadcasting the encoding to x\'s shape and adding them.'
@staticmethod def _condition(x, encoding):
(mb, length, channels) = x.get_shape().as_list() (enc_mb, enc_length, enc_channels) = encoding.get_shape().as_list() assert (enc_mb == mb) assert (enc_channels == channels) encoding = tf.reshape(encoding, [mb, enc_length, 1, channels]) x = tf.reshape(x, [mb, enc_length, (-1), channels]) x += encoding x = tf.reshape(x, [mb, length, channels]) x.set_shape([mb, length, channels]) return x
'Build the graph for this configuration. Args: inputs: A dict of inputs. For training, should contain \'wav\'. is_training: Whether we are training or not. Not used in this config. Returns: A dict of outputs that includes the \'predictions\', \'loss\', the \'encoding\', the \'quantized_input\', and whatever metrics we want to track for eval.'
def build(self, inputs, is_training):
del is_training num_stages = 10 num_layers = 30 filter_length = 3 width = 512 skip_width = 256 ae_num_stages = 10 ae_num_layers = 30 ae_filter_length = 3 ae_width = 128 x = inputs['wav'] x_quantized = utils.mu_law(x) x_scaled = (tf.cast(x_quantized, tf.float32) / 128.0) x_scaled = tf.expand_dims(x_scaled, 2) en = masked.conv1d(x_scaled, causal=False, num_filters=ae_width, filter_length=ae_filter_length, name='ae_startconv') for num_layer in xrange(ae_num_layers): dilation = (2 ** (num_layer % ae_num_stages)) d = tf.nn.relu(en) d = masked.conv1d(d, causal=False, num_filters=ae_width, filter_length=ae_filter_length, dilation=dilation, name=('ae_dilatedconv_%d' % (num_layer + 1))) d = tf.nn.relu(d) en += masked.conv1d(d, num_filters=ae_width, filter_length=1, name=('ae_res_%d' % (num_layer + 1))) en = masked.conv1d(en, num_filters=self.ae_bottleneck_width, filter_length=1, name='ae_bottleneck') en = masked.pool1d(en, self.ae_hop_length, name='ae_pool', mode='avg') encoding = en l = masked.shift_right(x_scaled) l = masked.conv1d(l, num_filters=width, filter_length=filter_length, name='startconv') s = masked.conv1d(l, num_filters=skip_width, filter_length=1, name='skip_start') for i in xrange(num_layers): dilation = (2 ** (i % num_stages)) d = masked.conv1d(l, num_filters=(2 * width), filter_length=filter_length, dilation=dilation, name=('dilatedconv_%d' % (i + 1))) d = self._condition(d, masked.conv1d(en, num_filters=(2 * width), filter_length=1, name=('cond_map_%d' % (i + 1)))) assert ((d.get_shape().as_list()[2] % 2) == 0) m = (d.get_shape().as_list()[2] // 2) d_sigmoid = tf.sigmoid(d[:, :, :m]) d_tanh = tf.tanh(d[:, :, m:]) d = (d_sigmoid * d_tanh) l += masked.conv1d(d, num_filters=width, filter_length=1, name=('res_%d' % (i + 1))) s += masked.conv1d(d, num_filters=skip_width, filter_length=1, name=('skip_%d' % (i + 1))) s = tf.nn.relu(s) s = masked.conv1d(s, num_filters=skip_width, filter_length=1, name='out1') s = self._condition(s, masked.conv1d(en, num_filters=skip_width, filter_length=1, name='cond_map_out1')) s = tf.nn.relu(s) logits = masked.conv1d(s, num_filters=256, filter_length=1, name='logits') logits = tf.reshape(logits, [(-1), 256]) probs = tf.nn.softmax(logits, name='softmax') x_indices = (tf.cast(tf.reshape(x_quantized, [(-1)]), tf.int32) + 128) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=x_indices, name='nll'), 0, name='loss') return {'predictions': probs, 'loss': loss, 'eval': {'nll': loss}, 'quantized_input': x_quantized, 'encoding': encoding}
'Get a single example from the tfrecord file. Args: batch_size: Int, minibatch size. Returns: tf.Example protobuf parsed from tfrecord.'
def get_example(self, batch_size):
reader = tf.TFRecordReader() num_epochs = (None if self.is_training else 1) capacity = batch_size path_queue = tf.train.input_producer([self.record_path], num_epochs=num_epochs, shuffle=self.is_training, capacity=capacity) (unused_key, serialized_example) = reader.read(path_queue) features = {'note_str': tf.FixedLenFeature([], dtype=tf.string), 'pitch': tf.FixedLenFeature([1], dtype=tf.int64), 'velocity': tf.FixedLenFeature([1], dtype=tf.int64), 'audio': tf.FixedLenFeature([64000], dtype=tf.float32), 'qualities': tf.FixedLenFeature([10], dtype=tf.int64), 'instrument_source': tf.FixedLenFeature([1], dtype=tf.int64), 'instrument_family': tf.FixedLenFeature([1], dtype=tf.int64)} example = tf.parse_single_example(serialized_example, features) return example
'Get the Tensor expressions from the reader. Args: batch_size: The integer batch size. length: Number of timesteps of a cropped sample to produce. Returns: A dict of key:tensor pairs. This includes "pitch", "wav", and "key".'
def get_wavenet_batch(self, batch_size, length=64000):
example = self.get_example(batch_size) wav = example['audio'] wav = tf.slice(wav, [0], [64000]) pitch = tf.squeeze(example['pitch']) key = tf.squeeze(example['note_str']) if self.is_training: crop = tf.random_crop(wav, [length]) crop = tf.reshape(crop, [1, length]) (key, crop, pitch) = tf.train.shuffle_batch([key, crop, pitch], batch_size, num_threads=4, capacity=(500 * batch_size), min_after_dequeue=(200 * batch_size)) else: offset = ((64000 - length) // 2) crop = tf.slice(wav, [offset], [length]) crop = tf.reshape(crop, [1, length]) (key, crop, pitch) = tf.train.shuffle_batch([key, crop, pitch], batch_size, num_threads=4, capacity=(500 * batch_size), min_after_dequeue=(200 * batch_size)) crop = tf.reshape(tf.cast(crop, tf.float32), [batch_size, length]) pitch = tf.cast(pitch, tf.int32) return {'pitch': pitch, 'wav': crop, 'key': key}
'Get the Tensor expressions from the reader. Args: hparams: Hyperparameters object with specgram parameters. Returns: A dict of key:tensor pairs. This includes "pitch", "wav", and "key".'
def get_baseline_batch(self, hparams):
example = self.get_example(hparams.batch_size) audio = tf.slice(example['audio'], [0], [64000]) audio = tf.reshape(audio, [1, 64000]) pitch = tf.slice(example['pitch'], [0], [1]) velocity = tf.slice(example['velocity'], [0], [1]) instrument_source = tf.slice(example['instrument_source'], [0], [1]) instrument_family = tf.slice(example['instrument_family'], [0], [1]) qualities = tf.slice(example['qualities'], [0], [10]) qualities = tf.reshape(qualities, [1, 10]) hop_length = hparams.hop_length n_fft = hparams.n_fft if (hop_length and n_fft): specgram = utils.tf_specgram(audio, n_fft=n_fft, hop_length=hop_length, mask=hparams.mask, log_mag=hparams.log_mag, re_im=hparams.re_im, dphase=hparams.dphase, mag_only=hparams.mag_only) shape = ([1] + SPECGRAM_REGISTRY[(n_fft, hop_length)]) if hparams.mag_only: shape[(-1)] = 1 specgram = tf.reshape(specgram, shape) tf.logging.info('SPECGRAM BEFORE PADDING', specgram) if hparams.pad: num_padding = ((2 ** int(np.ceil((np.log(shape[2]) / np.log(2))))) - shape[2]) tf.logging.info(('num_pading: %d' % num_padding)) specgram = tf.reshape(specgram, shape) specgram = tf.pad(specgram, [[0, 0], [0, 0], [0, num_padding], [0, 0]]) specgram = tf.slice(specgram, [0, 0, 0, 0], [(-1), (shape[1] - 1), (-1), (-1)]) tf.logging.info('SPECGRAM AFTER PADDING', specgram) if self.is_training: (audio, velocity, pitch, specgram, instrument_source, instrument_family, qualities) = tf.train.shuffle_batch([audio, velocity, pitch, specgram, instrument_source, instrument_family, qualities], batch_size=hparams.batch_size, capacity=(20 * hparams.batch_size), min_after_dequeue=(10 * hparams.batch_size), enqueue_many=True) elif (hparams.batch_size > 1): (audio, velocity, pitch, specgram, instrument_source, instrument_family, qualities) = tf.train.batch([audio, velocity, pitch, specgram, instrument_source, instrument_family, qualities], batch_size=hparams.batch_size, capacity=(10 * hparams.batch_size), enqueue_many=True) audio.set_shape([hparams.batch_size, 64000]) batch = dict(pitch=pitch, velocity=velocity, audio=audio, instrument_source=instrument_source, instrument_family=instrument_family, qualities=qualities, spectrogram=specgram) return batch
'Generate a melody from a primer melody and backing chords. Args: primer_melody: The primer melody, a Melody object. Should be the same length as the primer chords. backing_chords: The backing chords, a ChordProgression object. Must be at least as long as the primer melody. The melody will be extended to match the length of the backing chords. temperature: A float specifying how much to divide the logits by before computing the softmax. Greater than 1.0 makes melodies more random, less than 1.0 makes melodies less random. beam_size: An integer, beam size to use when generating melodies via beam search. branch_factor: An integer, beam search branch factor to use. steps_per_iteration: An integer, number of melody steps to take per beam search iteration. Returns: The generated Melody object (which begins with the provided primer melody).'
def generate_melody(self, primer_melody, backing_chords, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1):
melody = copy.deepcopy(primer_melody) chords = copy.deepcopy(backing_chords) transpose_amount = melody.squash(self._config.min_note, self._config.max_note, self._config.transpose_to_key) chords.transpose(transpose_amount) num_steps = len(chords) melody = self._generate_events(num_steps, melody, temperature, beam_size, branch_factor, steps_per_iteration, control_events=chords) melody.transpose((- transpose_amount)) return melody
'Evaluate the log likelihood of a melody conditioned on backing chords. Args: melody: The Melody object for which to evaluate the log likelihood. backing_chords: The backing chords, a ChordProgression object. Returns: The log likelihood of `melody` conditioned on `backing_chords` under this model.'
def melody_log_likelihood(self, melody, backing_chords):
melody_copy = copy.deepcopy(melody) chords_copy = copy.deepcopy(backing_chords) transpose_amount = melody_copy.squash(self._config.min_note, self._config.max_note, self._config.transpose_to_key) chords_copy.transpose(transpose_amount) return self._evaluate_log_likelihood([melody_copy], control_events=chords_copy)[0]
'Constructs an EncoderPipeline. Args: config: An ImprovRnnConfig that specifies the encoder/decoder, pitch range, and transposition behavior. name: A unique pipeline name.'
def __init__(self, config, name):
super(EncoderPipeline, self).__init__(input_type=magenta.music.LeadSheet, output_type=tf.train.SequenceExample, name=name) self._conditional_encoder_decoder = config.encoder_decoder self._min_note = config.min_note self._max_note = config.max_note self._transpose_to_key = config.transpose_to_key
'Creates an ImprovRnnSequenceGenerator. Args: model: Instance of ImprovRnnModel. details: A generator_pb2.GeneratorDetails for this generator. steps_per_quarter: What precision to use when quantizing the melody and chords. How many steps per quarter note. checkpoint: Where to search for the most recent model checkpoint. Mutually exclusive with `bundle`. bundle: A GeneratorBundle object that includes both the model checkpoint and metagraph. Mutually exclusive with `checkpoint`.'
def __init__(self, model, details, steps_per_quarter=4, checkpoint=None, bundle=None):
super(ImprovRnnSequenceGenerator, self).__init__(model, details, checkpoint, bundle) self.steps_per_quarter = steps_per_quarter
'Generate a polyphonic track from a primer polyphonic track. Args: num_steps: The integer length in steps of the final track, after generation. Includes the primer. primer_sequence: The primer sequence, a PolyphonicSequence object. temperature: A float specifying how much to divide the logits by before computing the softmax. Greater than 1.0 makes tracks more random, less than 1.0 makes tracks less random. beam_size: An integer, beam size to use when generating tracks via beam search. branch_factor: An integer, beam search branch factor to use. steps_per_iteration: An integer, number of steps to take per beam search iteration. modify_events_callback: An optional callback for modifying the event list. Can be used to inject events rather than having them generated. If not None, will be called with 3 arguments after every event: the current EventSequenceEncoderDecoder, a list of current EventSequences, and a list of current encoded event inputs. Returns: The generated PolyphonicSequence object (which begins with the provided primer track).'
def generate_polyphonic_sequence(self, num_steps, primer_sequence, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, modify_events_callback=None):
return self._generate_events(num_steps, primer_sequence, temperature, beam_size, branch_factor, steps_per_iteration, modify_events_callback=modify_events_callback)
'Evaluate the log likelihood of a polyphonic sequence. Args: sequence: The PolyphonicSequence object for which to evaluate the log likelihood. Returns: The log likelihood of `sequence` under this model.'
def polyphonic_sequence_log_likelihood(self, sequence):
return self._evaluate_log_likelihood([sequence])[0]
'Construct a PolyphonicSequence. Either quantized_sequence or steps_per_quarter should be supplied. Args: quantized_sequence: a quantized NoteSequence proto. steps_per_quarter: how many steps a quarter note represents. start_step: The offset of this sequence relative to the beginning of the source sequence. If a quantized sequence is used as input, only notes starting after this step will be considered.'
def __init__(self, quantized_sequence=None, steps_per_quarter=None, start_step=0):
assert ((quantized_sequence, steps_per_quarter).count(None) == 1) if quantized_sequence: sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) self._events = self._from_quantized_sequence(quantized_sequence, start_step) self._steps_per_quarter = quantized_sequence.quantization_info.steps_per_quarter else: self._events = [PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)] self._steps_per_quarter = steps_per_quarter self._start_step = start_step
'Removes the trailing END event if present. Should be called before using a sequence to prime generation.'
def trim_trailing_end_events(self):
while (self._events[(-1)].event_type == PolyphonicEvent.END): del self._events[(-1)]
'Adds steps of silence to the end of the sequence.'
def _append_silence_steps(self, num_steps):
for _ in range(num_steps): self._events.append(PolyphonicEvent(event_type=PolyphonicEvent.STEP_END, pitch=None))
'Trims a given number of steps from the end of the sequence.'
def _trim_steps(self, num_steps):
steps_trimmed = 0 for i in reversed(range(len(self._events))): if (self._events[i].event_type == PolyphonicEvent.STEP_END): if (steps_trimmed == num_steps): del self._events[(i + 1):] break steps_trimmed += 1 elif (i == 0): self._events = [PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)] break
'Sets the length of the sequence to the specified number of steps. If the event sequence is not long enough, pads with silence to make the sequence the specified length. If it is too long, it will be truncated to the requested length. Note that this will append a STEP_END event to the end of the sequence if there is an unfinished step. Args: steps: How many quantized steps long the event sequence should be. from_left: Whether to add/remove from the left instead of right.'
def set_length(self, steps, from_left=False):
if from_left: raise NotImplementedError('from_left is not supported') self.trim_trailing_end_events() self._events.append(PolyphonicEvent(event_type=PolyphonicEvent.STEP_END, pitch=None)) if (self.num_steps < steps): self._append_silence_steps((steps - self.num_steps)) elif (self.num_steps > steps): self._trim_steps((self.num_steps - steps)) self._events.append(PolyphonicEvent(event_type=PolyphonicEvent.END, pitch=None)) assert (self.num_steps == steps)
'Appends the event to the end of the sequence. Args: event: The polyphonic event to append to the end. Raises: ValueError: If `event` is not a valid polyphonic event.'
def append(self, event):
if (not isinstance(event, PolyphonicEvent)): raise ValueError(('Invalid polyphonic event: %s' % event)) self._events.append(event)
'How many events are in this sequence. Returns: Number of events as an integer.'
def __len__(self):
return len(self._events)
'Returns the event at the given index.'
def __getitem__(self, i):
return self._events[i]
'Return an iterator over the events in this sequence.'
def __iter__(self):
return iter(self._events)
'Returns how many steps long this sequence is. Does not count incomplete steps (i.e., steps that do not have a terminating STEP_END event). Returns: Length of the sequence in quantized steps.'
@property def num_steps(self):
steps = 0 for event in self: if (event.event_type == PolyphonicEvent.STEP_END): steps += 1 return steps
'Populate self with events from the given quantized NoteSequence object. Sequences start with START. Within a step, new pitches are started with NEW_NOTE and existing pitches are continued with CONTINUED_NOTE. A step is ended with STEP_END. If an active pitch is not continued, it is considered to have ended. Sequences end with END. Args: quantized_sequence: A quantized NoteSequence instance. start_step: Start converting the sequence at this time step. Assumed to be the beginning of a bar. Returns: A list of events.'
@staticmethod def _from_quantized_sequence(quantized_sequence, start_step=0):
pitch_start_steps = collections.defaultdict(list) pitch_end_steps = collections.defaultdict(list) for note in quantized_sequence.notes: if (note.quantized_start_step < start_step): continue pitch_start_steps[note.quantized_start_step].append(note.pitch) pitch_end_steps[note.quantized_end_step].append(note.pitch) events = [PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)] active_pitches = [] for step in range(start_step, quantized_sequence.total_quantized_steps): step_events = [] for pitch in pitch_end_steps[step]: active_pitches.remove(pitch) for pitch in active_pitches: step_events.append(PolyphonicEvent(event_type=PolyphonicEvent.CONTINUED_NOTE, pitch=pitch)) for pitch in pitch_start_steps[step]: active_pitches.append(pitch) step_events.append(PolyphonicEvent(event_type=PolyphonicEvent.NEW_NOTE, pitch=pitch)) events.extend(sorted(step_events, key=(lambda e: e.pitch), reverse=True)) events.append(PolyphonicEvent(event_type=PolyphonicEvent.STEP_END, pitch=None)) events.append(PolyphonicEvent(event_type=PolyphonicEvent.END, pitch=None)) return events
'Converts the PolyphonicSequence to NoteSequence proto. Assumes that the sequences ends with a STEP_END followed by an END event. To ensure this is true, call set_length before calling this method. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. program: Midi program to give each note. qpm: Quarter notes per minute (float). base_note_sequence: A NoteSequence to use a starting point. Must match the specified qpm. Raises: ValueError: if an unknown event is encountered. Returns: A NoteSequence proto.'
def to_sequence(self, velocity=100, instrument=0, program=0, qpm=constants.DEFAULT_QUARTERS_PER_MINUTE, base_note_sequence=None):
seconds_per_step = ((60.0 / qpm) / self._steps_per_quarter) sequence_start_time = (self.start_step * seconds_per_step) if base_note_sequence: sequence = copy.deepcopy(base_note_sequence) if (sequence.tempos[0].qpm != qpm): raise ValueError(('Supplied QPM (%d) does not match QPM of base_note_sequence (%d)' % (qpm, sequence.tempos[0].qpm))) else: sequence = music_pb2.NoteSequence() sequence.tempos.add().qpm = qpm sequence.ticks_per_quarter = STANDARD_PPQ step = 0 pitch_start_steps = [] pitches_to_end = [] for (i, event) in enumerate(self): if (event.event_type == PolyphonicEvent.START): if (i != 0): tf.logging.debug(('Ignoring START marker not at beginning of sequence at position %d' % i)) elif ((event.event_type == PolyphonicEvent.END) and (i < (len(self) - 1))): tf.logging.debug(('Ignoring END maker before end of sequence at position %d' % i)) elif (event.event_type == PolyphonicEvent.NEW_NOTE): pitch_start_steps.append((event.pitch, step)) elif (event.event_type == PolyphonicEvent.CONTINUED_NOTE): try: pitches_to_end.remove(event.pitch) except ValueError: tf.logging.debug(('Attempted to continue pitch %s at step %s, but pitch was not active. Ignoring.' % (event.pitch, step))) elif ((event.event_type == PolyphonicEvent.STEP_END) or (event.event_type == PolyphonicEvent.END)): for pitch_start_step in list(pitch_start_steps): if (pitch_start_step[0] in pitches_to_end): pitches_to_end.remove(pitch_start_step[0]) pitch_start_steps.remove(pitch_start_step) note = sequence.notes.add() note.start_time = ((pitch_start_step[1] * seconds_per_step) + sequence_start_time) note.end_time = ((step * seconds_per_step) + sequence_start_time) note.pitch = pitch_start_step[0] note.velocity = velocity note.instrument = instrument note.program = program assert (not pitches_to_end) step += 1 pitches_to_end = [ps[0] for ps in pitch_start_steps] else: raise ValueError(('Unknown event type: %s' % event.event_type)) if pitch_start_steps: raise ValueError('Sequence ended, but not all pitches were ended. This likely means the sequence was missing a STEP_END event before the end of the sequence. To ensure a well-formed sequence, call set_length first.') sequence.total_time = ((seconds_per_step * (step - 1)) + sequence_start_time) if sequence.notes: assert (sequence.total_time >= sequence.notes[(-1)].end_time) return sequence
'Creates a PolyphonyRnnSequenceGenerator. Args: model: Instance of PolyphonyRnnModel. details: A generator_pb2.GeneratorDetails for this generator. steps_per_quarter: What precision to use when quantizing the sequence. How many steps per quarter note. checkpoint: Where to search for the most recent model checkpoint. Mutually exclusive with `bundle`. bundle: A GeneratorBundle object that includes both the model checkpoint and metagraph. Mutually exclusive with `checkpoint`.'
def __init__(self, model, details, steps_per_quarter=4, checkpoint=None, bundle=None):
super(PolyphonyRnnSequenceGenerator, self).__init__(model, details, checkpoint, bundle) self.steps_per_quarter = steps_per_quarter
'Generate a drum track from a primer drum track. Args: num_steps: The integer length in steps of the final drum track, after generation. Includes the primer. primer_drums: The primer drum track, a DrumTrack object. temperature: A float specifying how much to divide the logits by before computing the softmax. Greater than 1.0 makes drum tracks more random, less than 1.0 makes drum tracks less random. beam_size: An integer, beam size to use when generating drum tracks via beam search. branch_factor: An integer, beam search branch factor to use. steps_per_iteration: An integer, number of steps to take per beam search iteration. Returns: The generated DrumTrack object (which begins with the provided primer drum track).'
def generate_drum_track(self, num_steps, primer_drums, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1):
return self._generate_events(num_steps, primer_drums, temperature, beam_size, branch_factor, steps_per_iteration)
'Evaluate the log likelihood of a drum track under the model. Args: drums: The DrumTrack object for which to evaluate the log likelihood. Returns: The log likelihood of `drums` under this model.'
def drum_track_log_likelihood(self, drums):
return self._evaluate_log_likelihood([drums])[0]
'Creates a DrumsRnnSequenceGenerator. Args: model: Instance of DrumsRnnModel. details: A generator_pb2.GeneratorDetails for this generator. steps_per_quarter: What precision to use when quantizing the melody. How many steps per quarter note. checkpoint: Where to search for the most recent model checkpoint. Mutually exclusive with `bundle`. bundle: A GeneratorBundle object that includes both the model checkpoint and metagraph. Mutually exclusive with `checkpoint`.'
def __init__(self, model, details, steps_per_quarter=4, checkpoint=None, bundle=None):
super(DrumsRnnSequenceGenerator, self).__init__(model, details, checkpoint, bundle) self.steps_per_quarter = steps_per_quarter
'Constructs an EncoderPipeline. Args: config: A MelodyRnnConfig that specifies the encoder/decoder, pitch range, and what key to transpose into. name: A unique pipeline name.'
def __init__(self, config, name):
super(EncoderPipeline, self).__init__(input_type=magenta.music.Melody, output_type=tf.train.SequenceExample, name=name) self._melody_encoder_decoder = config.encoder_decoder self._min_note = config.min_note self._max_note = config.max_note self._transpose_to_key = config.transpose_to_key
'Creates a MelodyRnnSequenceGenerator. Args: model: Instance of MelodyRnnModel. details: A generator_pb2.GeneratorDetails for this generator. steps_per_quarter: What precision to use when quantizing the melody. How many steps per quarter note. checkpoint: Where to search for the most recent model checkpoint. Mutually exclusive with `bundle`. bundle: A GeneratorBundle object that includes both the model checkpoint and metagraph. Mutually exclusive with `checkpoint`.'
def __init__(self, model, details, steps_per_quarter=4, checkpoint=None, bundle=None):
super(MelodyRnnSequenceGenerator, self).__init__(model, details, checkpoint, bundle) self.steps_per_quarter = steps_per_quarter
'Generate a melody from a primer melody. Args: num_steps: The integer length in steps of the final melody, after generation. Includes the primer. primer_melody: The primer melody, a Melody object. temperature: A float specifying how much to divide the logits by before computing the softmax. Greater than 1.0 makes melodies more random, less than 1.0 makes melodies less random. beam_size: An integer, beam size to use when generating melodies via beam search. branch_factor: An integer, beam search branch factor to use. steps_per_iteration: An integer, number of melody steps to take per beam search iteration. Returns: The generated Melody object (which begins with the provided primer melody).'
def generate_melody(self, num_steps, primer_melody, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1):
melody = copy.deepcopy(primer_melody) transpose_amount = melody.squash(self._config.min_note, self._config.max_note, self._config.transpose_to_key) melody = self._generate_events(num_steps, melody, temperature, beam_size, branch_factor, steps_per_iteration) melody.transpose((- transpose_amount)) return melody
'Evaluate the log likelihood of a melody under the model. Args: melody: The Melody object for which to evaluate the log likelihood. Returns: The log likelihood of `melody` under this model.'
def melody_log_likelihood(self, melody):
melody_copy = copy.deepcopy(melody) melody_copy.squash(self._config.min_note, self._config.max_note, self._config.transpose_to_key) return self._evaluate_log_likelihood([melody_copy])[0]
'Initialize the EventSequenceRnnModel. Args: config: An EventSequenceRnnConfig containing the encoder/decoder and HParams to use.'
def __init__(self, config):
super(EventSequenceRnnModel, self).__init__() self._config = config
'Extracts the batch size from the graph.'
def _batch_size(self):
return self._session.graph.get_collection('inputs')[0].shape[0].value
'Extends a batch of event sequences by a single step each. This method modifies the event sequences in place. Args: event_sequences: A list of event sequences, each of which is a Python list-like object. The list of event sequences should have length equal to `self._batch_size()`. These are extended by this method. inputs: A Python list of model inputs, with length equal to `self._batch_size()`. initial_state: A numpy array containing the initial RNN state, where `initial_state.shape[0]` is equal to `self._batch_size()`. temperature: The softmax temperature. Returns: final_state: The final RNN state, a numpy array the same size as `initial_state`. loglik: The log-likelihood of the chosen softmax value for each event sequence, a 1-D numpy array of length `self._batch_size()`. If `inputs` is a full-length inputs batch, the log-likelihood of each entire sequence up to and including the generated step will be computed and returned.'
def _generate_step_for_batch(self, event_sequences, inputs, initial_state, temperature):
assert (len(event_sequences) == self._batch_size()) graph_inputs = self._session.graph.get_collection('inputs')[0] graph_initial_state = self._session.graph.get_collection('initial_state') graph_final_state = self._session.graph.get_collection('final_state') graph_softmax = self._session.graph.get_collection('softmax')[0] graph_temperature = self._session.graph.get_collection('temperature') feed_dict = {graph_inputs: inputs, tuple(graph_initial_state): initial_state} if graph_temperature: feed_dict[graph_temperature[0]] = temperature (final_state, softmax) = self._session.run([graph_final_state, graph_softmax], feed_dict) if (softmax.shape[1] > 1): loglik = self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmax[:, :(-1), :]) else: loglik = np.zeros(len(event_sequences)) indices = self._config.encoder_decoder.extend_event_sequences(event_sequences, softmax) p = softmax[(range(len(event_sequences)), (-1), indices)] return (final_state, (loglik + np.log(p)))
'Extends a list of event sequences by a single step each. This method modifies the event sequences in place. It also returns the modified event sequences and updated model states and log-likelihoods. Args: event_sequences: A list of event sequence objects, which are extended by this method. model_states: A list of model states, each of which contains model inputs and initial RNN states. logliks: A list containing the current log-likelihood for each event sequence. temperature: The softmax temperature. extend_control_events_callback: A function that takes three arguments: a current control event sequence, a current generated event sequence, and the control state. The function should a) extend the control event sequence to be one longer than the generated event sequence (or do nothing if it is already at least this long), and b) return the resulting control state. modify_events_callback: An optional callback for modifying the event list. Can be used to inject events rather than having them generated. If not None, will be called with 3 arguments after every event: the current EventSequenceEncoderDecoder, a list of current EventSequences, and a list of current encoded event inputs. Returns: event_sequences: A list of extended event sequences. These are modified in place but also returned. final_states: A list of resulting model states, containing model inputs for the next step along with RNN states for each event sequence. logliks: A list containing the updated log-likelihood for each event sequence.'
def _generate_step(self, event_sequences, model_states, logliks, temperature, extend_control_events_callback=None, modify_events_callback=None):
batch_size = self._batch_size() num_seqs = len(event_sequences) num_batches = int(np.ceil((num_seqs / float(batch_size)))) inputs = [model_state.inputs for model_state in model_states] initial_states = [model_state.rnn_state for model_state in model_states] control_sequences = [model_state.control_events for model_state in model_states] control_states = [model_state.control_state for model_state in model_states] final_states = [] logliks = np.array(logliks, dtype=np.float32) pad_amt = ((- len(event_sequences)) % batch_size) padded_event_sequences = (event_sequences + [copy.deepcopy(event_sequences[(-1)]) for _ in range(pad_amt)]) padded_inputs = (inputs + ([inputs[(-1)]] * pad_amt)) padded_initial_states = (initial_states + ([initial_states[(-1)]] * pad_amt)) for b in range(num_batches): (i, j) = ((b * batch_size), ((b + 1) * batch_size)) pad_amt = max(0, (j - num_seqs)) (batch_final_state, batch_loglik) = self._generate_step_for_batch(padded_event_sequences[i:j], padded_inputs[i:j], state_util.batch(padded_initial_states[i:j], batch_size), temperature) final_states += state_util.unbatch(batch_final_state, batch_size)[:((j - i) - pad_amt)] logliks[i:(j - pad_amt)] += batch_loglik[:((j - i) - pad_amt)] if (extend_control_events_callback is not None): for idx in range(len(control_sequences)): control_states[idx] = extend_control_events_callback(control_sequences[idx], event_sequences[idx], control_states[idx]) next_inputs = self._config.encoder_decoder.get_inputs_batch(control_sequences, event_sequences) else: next_inputs = self._config.encoder_decoder.get_inputs_batch(event_sequences) if modify_events_callback: modify_events_callback(self._config.encoder_decoder, event_sequences, next_inputs) model_states = [ModelState(inputs=inputs, rnn_state=final_state, control_events=control_events, control_state=control_state) for (inputs, final_state, control_events, control_state) in zip(next_inputs, final_states, control_sequences, control_states)] return (event_sequences, model_states, logliks)
'Generate an event sequence from a primer sequence. Args: num_steps: The integer length in steps of the final event sequence, after generation. Includes the primer. primer_events: The primer event sequence, a Python list-like object. temperature: A float specifying how much to divide the logits by before computing the softmax. Greater than 1.0 makes events more random, less than 1.0 makes events less random. beam_size: An integer, beam size to use when generating event sequences via beam search. branch_factor: An integer, beam search branch factor to use. steps_per_iteration: An integer, number of steps to take per beam search iteration. control_events: A sequence of control events upon which to condition the generation. If not None, the encoder/decoder should be a ConditionalEventSequenceEncoderDecoder, and the control events will be used along with the target sequence to generate model inputs. In some cases, the control event sequence cannot be fully-determined as later control events depend on earlier generated events; use the `extend_control_events_callback` argument to provide a function that extends the control event sequence. control_state: Initial state used by `extend_control_events_callback`. extend_control_events_callback: A function that takes three arguments: a current control event sequence, a current generated event sequence, and the control state. The function should a) extend the control event sequence to be one longer than the generated event sequence (or do nothing if it is already at least this long), and b) return the resulting control state. modify_events_callback: An optional callback for modifying the event list. Can be used to inject events rather than having them generated. If not None, will be called with 3 arguments after every event: the current EventSequenceEncoderDecoder, a list of current EventSequences, and a list of current encoded event inputs. Returns: The generated event sequence (which begins with the provided primer). Raises: EventSequenceRnnModelException: If the primer sequence has zero length or is not shorter than num_steps.'
def _generate_events(self, num_steps, primer_events, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, control_events=None, control_state=None, extend_control_events_callback=_extend_control_events_default, modify_events_callback=None):
if ((control_events is not None) and (not isinstance(self._config.encoder_decoder, mm.ConditionalEventSequenceEncoderDecoder))): raise EventSequenceRnnModelException('control sequence provided but encoder/decoder is not a ConditionalEventSequenceEncoderDecoder') if ((control_events is not None) and (extend_control_events_callback is None)): raise EventSequenceRnnModelException('must provide callback for extending control sequence (or usedefault)') if (not primer_events): raise EventSequenceRnnModelException('primer sequence must have non-zero length') if (len(primer_events) >= num_steps): raise EventSequenceRnnModelException('primer sequence must be shorter than `num_steps`') if (len(primer_events) >= num_steps): return primer_events event_sequences = [copy.deepcopy(primer_events)] if (control_events is not None): control_state = extend_control_events_callback(control_events, primer_events, control_state) inputs = self._config.encoder_decoder.get_inputs_batch([control_events], event_sequences, full_length=True) else: inputs = self._config.encoder_decoder.get_inputs_batch(event_sequences, full_length=True) if modify_events_callback: modify_events_callback(self._config.encoder_decoder, event_sequences, inputs) graph_initial_state = self._session.graph.get_collection('initial_state') initial_states = state_util.unbatch(self._session.run(graph_initial_state)) initial_state = ModelState(inputs=inputs[0], rnn_state=initial_states[0], control_events=control_events, control_state=control_state) (events, _, loglik) = beam_search(initial_sequence=event_sequences[0], initial_state=initial_state, generate_step_fn=functools.partial(self._generate_step, temperature=temperature, extend_control_events_callback=(extend_control_events_callback if (control_events is not None) else None), modify_events_callback=modify_events_callback), num_steps=(num_steps - len(primer_events)), beam_size=beam_size, branch_factor=branch_factor, steps_per_iteration=steps_per_iteration) tf.logging.info('Beam search yields sequence with log-likelihood: %f ', loglik) return events
'Evaluates the log likelihood of a batch of event sequences. Args: event_sequences: A list of event sequences, each of which is a Python list-like object. The list of event sequences should have length equal to `self._batch_size()`. inputs: A Python list of model inputs, with length equal to `self._batch_size()`. initial_state: A numpy array containing the initial RNN state, where `initial_state.shape[0]` is equal to `self._batch_size()`. Returns: A Python list containing the log likelihood of each sequence in `event_sequences`.'
def _evaluate_batch_log_likelihood(self, event_sequences, inputs, initial_state):
graph_inputs = self._session.graph.get_collection('inputs')[0] graph_initial_state = self._session.graph.get_collection('initial_state') graph_softmax = self._session.graph.get_collection('softmax')[0] graph_temperature = self._session.graph.get_collection('temperature') feed_dict = {graph_inputs: inputs, tuple(graph_initial_state): initial_state} if graph_temperature: feed_dict[graph_temperature[0]] = 1.0 softmax = self._session.run(graph_softmax, feed_dict) return self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmax)
'Evaluate log likelihood for a list of event sequences of the same length. Args: event_sequences: A list of event sequences for which to evaluate the log likelihood. control_events: A sequence of control events upon which to condition the event sequences. If not None, the encoder/decoder should be a ConditionalEventSequenceEncoderDecoder, and the log likelihood of each event sequence will be computed conditional on the control sequence. Returns: The log likelihood of each sequence in `event_sequences`. Raises: EventSequenceRnnModelException: If the event sequences are not all the same length, or if the control sequence is shorter than the event sequences.'
def _evaluate_log_likelihood(self, event_sequences, control_events=None):
num_steps = len(event_sequences[0]) for events in event_sequences[1:]: if (len(events) != num_steps): raise EventSequenceRnnModelException('log likelihood evaluation requires all event sequences to have the same length') if ((control_events is not None) and (len(control_events) < num_steps)): raise EventSequenceRnnModelException('control sequence must be at least as long as the event sequences') batch_size = self._batch_size() num_full_batches = (len(event_sequences) / batch_size) loglik = np.empty(len(event_sequences)) if (control_events is not None): inputs = self._config.encoder_decoder.get_inputs_batch(([control_events] * len(event_sequences)), [events[:(-1)] for events in event_sequences], full_length=True) else: inputs = self._config.encoder_decoder.get_inputs_batch([events[:(-1)] for events in event_sequences], full_length=True) graph_initial_state = self._session.graph.get_collection('initial_state') initial_state = ([self._session.run(graph_initial_state)] * len(event_sequences)) offset = 0 for _ in range(num_full_batches): batch_indices = range(offset, (offset + batch_size)) batch_loglik = self._evaluate_batch_log_likelihood([event_sequences[i] for i in batch_indices], [inputs[i] for i in batch_indices], initial_state[batch_indices]) loglik[batch_indices] = batch_loglik offset += batch_size if (offset < len(event_sequences)): num_extra = (len(event_sequences) - offset) pad_size = (batch_size - num_extra) batch_indices = range(offset, len(event_sequences)) batch_loglik = self._evaluate_batch_log_likelihood(([event_sequences[i] for i in batch_indices] + [copy.deepcopy(event_sequences[(-1)]) for _ in range(pad_size)]), ([inputs[i] for i in batch_indices] + (inputs[(-1)] * pad_size)), np.append(initial_state[batch_indices], np.tile(inputs[(-1), :], (pad_size, 1)), axis=0)) loglik[batch_indices] = batch_loglik[0:num_extra] return loglik
'Initializes the MelodyQNetwork class. Args: output_dir: Where the model will save its compositions (midi files). dqn_hparams: A HParams object containing the hyperparameters of the DQN algorithm, including minibatch size, exploration probability, etc. reward_mode: Controls which reward function can be applied. There are several, including \'scale\', which teaches the model to play a scale, and of course \'music_theory_all\', which is a music-theory-based reward function composed of other functions. reward_scaler: Controls the emphasis placed on the music theory rewards. This value is the inverse of \'c\' in the academic paper. exploration_mode: can be \'egreedy\' which is an epsilon greedy policy, or it can be \'boltzmann\', in which the model will sample from its output distribution to choose the next action. priming_mode: Each time the model begins a new composition, it is primed with either a random note (\'random_note\'), a random MIDI file from the training data (\'random_midi\'), or a particular MIDI file (\'single_midi\'). stochastic_observations: If False, the note that the model chooses to play next (the argmax of its softmax probabilities) deterministically becomes the next note it will observe. If True, the next observation will be sampled from the model\'s softmax output. algorithm: can be \'default\', \'psi\', \'g\' or \'pure_rl\', for different learning algorithms note_rnn_checkpoint_dir: The directory from which the internal NoteRNNLoader will load its checkpointed LSTM. note_rnn_checkpoint_file: A checkpoint file to use in case one cannot be found in the note_rnn_checkpoint_dir. note_rnn_type: If \'default\', will use the basic LSTM described in the research paper. If \'basic_rnn\', will assume the checkpoint is from a Magenta basic_rnn model. note_rnn_hparams: A HParams object which defines the hyper parameters used to train the MelodyRNN model that will be loaded from a checkpoint. num_notes_in_melody: The length of a composition of the model input_size: the size of the one-hot vector encoding a note that is input to the model. num_actions: The size of the one-hot vector encoding a note that is output by the model. midi_primer: A midi file that can be used to prime the model if priming_mode is set to \'single_midi\'. save_name: Name the model will use to save checkpoints. output_every_nth: How many training steps before the model will print an output saying the cumulative reward, and save a checkpoint. training_file_list: A list of paths to tfrecord files containing melody training data. This is necessary to use the \'random_midi\' priming mode. summary_writer: A tf.summary.FileWriter used to log metrics. initialize_immediately: if True, the class will instantiate its component MelodyRNN networks and build the graph in the constructor.'
def __init__(self, output_dir, dqn_hparams=None, reward_mode='music_theory_all', reward_scaler=1.0, exploration_mode='egreedy', priming_mode='random_note', stochastic_observations=False, algorithm='q', note_rnn_checkpoint_dir=None, note_rnn_checkpoint_file=None, note_rnn_type='default', note_rnn_hparams=None, num_notes_in_melody=32, input_size=rl_tuner_ops.NUM_CLASSES, num_actions=rl_tuner_ops.NUM_CLASSES, midi_primer=None, save_name='rl_tuner.ckpt', output_every_nth=1000, training_file_list=None, summary_writer=None, initialize_immediately=True):
self.graph = tf.Graph() with self.graph.as_default(): self.input_size = input_size self.num_actions = num_actions self.output_every_nth = output_every_nth self.output_dir = output_dir self.save_path = os.path.join(output_dir, save_name) self.reward_scaler = reward_scaler self.reward_mode = reward_mode self.exploration_mode = exploration_mode self.num_notes_in_melody = num_notes_in_melody self.stochastic_observations = stochastic_observations self.algorithm = algorithm self.priming_mode = priming_mode self.midi_primer = midi_primer self.training_file_list = training_file_list self.note_rnn_checkpoint_dir = note_rnn_checkpoint_dir self.note_rnn_checkpoint_file = note_rnn_checkpoint_file self.note_rnn_hparams = note_rnn_hparams self.note_rnn_type = note_rnn_type if ((priming_mode == 'single_midi') and (midi_primer is None)): tf.logging.fatal('A midi primer file is required when usingthe single_midi priming mode.') if ((note_rnn_checkpoint_dir is None) or (not note_rnn_checkpoint_dir)): print('Retrieving checkpoint of Note RNN from Magenta download server.') urllib.urlretrieve('http://download.magenta.tensorflow.org/models/rl_tuner_note_rnn.ckpt', 'note_rnn.ckpt') self.note_rnn_checkpoint_dir = os.getcwd() self.note_rnn_checkpoint_file = os.path.join(os.getcwd(), 'note_rnn.ckpt') if (self.note_rnn_hparams is None): if (self.note_rnn_type == 'basic_rnn'): self.note_rnn_hparams = rl_tuner_ops.basic_rnn_hparams() else: self.note_rnn_hparams = rl_tuner_ops.default_hparams() if ((self.algorithm == 'g') or (self.algorithm == 'pure_rl')): self.reward_mode = 'music_theory_only' if (dqn_hparams is None): self.dqn_hparams = rl_tuner_ops.default_dqn_hparams() else: self.dqn_hparams = dqn_hparams self.discount_rate = tf.constant(self.dqn_hparams.discount_rate) self.target_network_update_rate = tf.constant(self.dqn_hparams.target_network_update_rate) self.optimizer = tf.train.AdamOptimizer() self.actions_executed_so_far = 0 self.experience = deque(maxlen=self.dqn_hparams.max_experience) self.iteration = 0 self.summary_writer = summary_writer self.num_times_store_called = 0 self.num_times_train_called = 0 self.reward_last_n = 0 self.rewards_batched = [] self.music_theory_reward_last_n = 0 self.music_theory_rewards_batched = [] self.note_rnn_reward_last_n = 0 self.note_rnn_rewards_batched = [] self.eval_avg_reward = [] self.eval_avg_music_theory_reward = [] self.eval_avg_note_rnn_reward = [] self.target_val_list = [] self.beat = 0 self.composition = [] self.composition_direction = 0 self.leapt_from = None self.steps_since_last_leap = 0 if (not exists(self.output_dir)): makedirs(self.output_dir) if initialize_immediately: self.initialize_internal_models_graph_session()
'Initializes internal RNN models, builds the graph, starts the session. Adds the graphs of the internal RNN models to this graph, adds the DQN ops to the graph, and starts a new Saver and session. By having a separate function for this rather than doing it in the constructor, it allows a model inheriting from this class to define its q_network differently. Args: restore_from_checkpoint: If True, the weights for the \'q_network\', \'target_q_network\', and \'reward_rnn\' will be loaded from a checkpoint. If false, these models will be initialized with random weights. Useful for checking what pure RL (with no influence from training data) sounds like.'
def initialize_internal_models_graph_session(self, restore_from_checkpoint=True):
with self.graph.as_default(): tf.logging.info('Initializing q network') self.q_network = note_rnn_loader.NoteRNNLoader(self.graph, 'q_network', self.note_rnn_checkpoint_dir, midi_primer=self.midi_primer, training_file_list=self.training_file_list, checkpoint_file=self.note_rnn_checkpoint_file, hparams=self.note_rnn_hparams, note_rnn_type=self.note_rnn_type) tf.logging.info('Initializing target q network') self.target_q_network = note_rnn_loader.NoteRNNLoader(self.graph, 'target_q_network', self.note_rnn_checkpoint_dir, midi_primer=self.midi_primer, training_file_list=self.training_file_list, checkpoint_file=self.note_rnn_checkpoint_file, hparams=self.note_rnn_hparams, note_rnn_type=self.note_rnn_type) tf.logging.info('Initializing reward network') self.reward_rnn = note_rnn_loader.NoteRNNLoader(self.graph, 'reward_rnn', self.note_rnn_checkpoint_dir, midi_primer=self.midi_primer, training_file_list=self.training_file_list, checkpoint_file=self.note_rnn_checkpoint_file, hparams=self.note_rnn_hparams, note_rnn_type=self.note_rnn_type) tf.logging.info('Q network cell: %s', self.q_network.cell) tf.logging.info('Adding RL graph variables') self.build_graph() self.saver = tf.train.Saver() self.session = tf.Session(graph=self.graph) self.session.run(tf.global_variables_initializer()) if restore_from_checkpoint: self.q_network.initialize_and_restore(self.session) self.target_q_network.initialize_and_restore(self.session) self.reward_rnn.initialize_and_restore(self.session) reward_vars = self.reward_rnn.variables() q_vars = self.q_network.variables() reward1 = self.session.run(reward_vars[0]) q1 = self.session.run(q_vars[0]) if (np.sum(((q1 - reward1) ** 2)) == 0.0): print('\nSuccessfully initialized internal nets from checkpoint!') tf.logging.info('\nSuccessfully initialized internal nets from checkpoint!') else: tf.logging.fatal('Error! The model was not initialized from checkpoint properly') else: self.q_network.initialize_new(self.session) self.target_q_network.initialize_new(self.session) self.reward_rnn.initialize_new(self.session) if (self.priming_mode == 'random_midi'): tf.logging.info('Getting priming melodies') self.get_priming_melodies()
'Runs a batch of training data through MelodyRNN model. If the priming mode is \'random_midi\', priming the q-network requires a random training melody. Therefore this function runs a batch of data from the training directory through the internal model, and the resulting internal states of the LSTM are stored in a list. The next note in each training melody is also stored in a corresponding list called \'priming_notes\'. Therefore, to prime the model with a random melody, it is only necessary to select a random index from 0 to batch_size-1 and use the hidden states and note at that index as input to the model.'
def get_priming_melodies(self):
(next_note_softmax, self.priming_states, lengths) = self.q_network.run_training_batch() self.priming_notes = ([0] * len(lengths)) for i in range(len(lengths)): start_i = (i * TRAIN_SEQUENCE_LENGTH) end_i = ((start_i + lengths[i]) - 1) end_softmax = next_note_softmax[end_i, :] self.priming_notes[i] = np.argmax(end_softmax) tf.logging.info('Stored priming notes: %s', self.priming_notes)
'Prime an internal model such as the q_network based on priming mode. Args: model: The internal model that should be primed. Returns: The first observation to feed into the model.'
def prime_internal_model(self, model):
model.state_value = model.get_zero_state() if (self.priming_mode == 'random_midi'): priming_idx = np.random.randint(0, len(self.priming_states)) model.state_value = np.reshape(self.priming_states[priming_idx, :], (1, model.cell.state_size)) priming_note = self.priming_notes[priming_idx] next_obs = np.array(rl_tuner_ops.make_onehot([priming_note], self.num_actions)).flatten() tf.logging.debug('Feeding priming state for midi file %s and corresponding note %s', priming_idx, priming_note) elif (self.priming_mode == 'single_midi'): model.prime_model() next_obs = model.priming_note elif (self.priming_mode == 'random_note'): next_obs = self.get_random_note() else: tf.logging.warn('Error! Invalid priming mode. Priming with random note') next_obs = self.get_random_note() return next_obs
'Samle a note uniformly at random. Returns: random note'
def get_random_note(self):
note_idx = np.random.randint(0, (self.num_actions - 1)) return np.array(rl_tuner_ops.make_onehot([note_idx], self.num_actions)).flatten()
'Starts the models internal composition over at beat 0, with no notes. Also resets statistics about whether the composition is in the middle of a melodic leap.'
def reset_composition(self):
self.beat = 0 self.composition = [] self.composition_direction = 0 self.leapt_from = None self.steps_since_last_leap = 0
'Builds the reinforcement learning tensorflow graph.'
def build_graph(self):
tf.logging.info('Adding reward computation portion of the graph') with tf.name_scope('reward_computation'): self.reward_scores = tf.identity(self.reward_rnn(), name='reward_scores') tf.logging.info('Adding taking action portion of graph') with tf.name_scope('taking_action'): self.action_scores = tf.identity(self.q_network(), name='action_scores') tf.summary.histogram('action_scores', self.action_scores) if (self.algorithm == 'g'): self.g_action_scores = (self.action_scores + self.reward_scores) self.action_softmax = tf.nn.softmax(self.g_action_scores, name='action_softmax') self.predicted_actions = tf.one_hot(tf.argmax(self.g_action_scores, dimension=1, name='predicted_actions'), self.num_actions) else: self.action_softmax = tf.nn.softmax(self.action_scores, name='action_softmax') self.predicted_actions = tf.one_hot(tf.argmax(self.action_scores, dimension=1, name='predicted_actions'), self.num_actions) tf.logging.info('Add estimating future rewards portion of graph') with tf.name_scope('estimating_future_rewards'): self.next_action_scores = tf.stop_gradient(self.target_q_network()) tf.summary.histogram('target_action_scores', self.next_action_scores) self.rewards = tf.placeholder(tf.float32, (None,), name='rewards') if (self.algorithm == 'psi'): self.target_vals = tf.reduce_logsumexp(self.next_action_scores, reduction_indices=[1]) elif (self.algorithm == 'g'): self.g_normalizer = tf.reduce_logsumexp(self.reward_scores, reduction_indices=[1]) self.g_normalizer = tf.reshape(self.g_normalizer, [(-1), 1]) self.g_normalizer = tf.tile(self.g_normalizer, [1, self.num_actions]) self.g_action_scores = tf.subtract((self.next_action_scores + self.reward_scores), self.g_normalizer) self.target_vals = tf.reduce_logsumexp(self.g_action_scores, reduction_indices=[1]) else: self.target_vals = tf.reduce_max(self.next_action_scores, reduction_indices=[1]) self.future_rewards = (self.rewards + (self.discount_rate * self.target_vals)) tf.logging.info('Adding q value prediction portion of graph') with tf.name_scope('q_value_prediction'): self.action_mask = tf.placeholder(tf.float32, (None, self.num_actions), name='action_mask') self.masked_action_scores = tf.reduce_sum((self.action_scores * self.action_mask), reduction_indices=[1]) temp_diff = (self.masked_action_scores - self.future_rewards) self.prediction_error = tf.reduce_mean(tf.square(temp_diff)) self.params = tf.trainable_variables() self.gradients = self.optimizer.compute_gradients(self.prediction_error) for (i, (grad, var)) in enumerate(self.gradients): if (grad is not None): self.gradients[i] = (tf.clip_by_norm(grad, 5), var) for (grad, var) in self.gradients: tf.summary.histogram(var.name, var) if (grad is not None): tf.summary.histogram((var.name + '/gradients'), grad) self.train_op = self.optimizer.apply_gradients(self.gradients) tf.logging.info('Adding target network update portion of graph') with tf.name_scope('target_network_update'): self.target_network_update = [] for (v_source, v_target) in zip(self.q_network.variables(), self.target_q_network.variables()): update_op = v_target.assign_sub((self.target_network_update_rate * (v_target - v_source))) self.target_network_update.append(update_op) self.target_network_update = tf.group(*self.target_network_update) tf.summary.scalar('prediction_error', self.prediction_error) self.summarize = tf.summary.merge_all() self.no_op1 = tf.no_op()
'Main training function that allows model to act, collects reward, trains. Iterates a number of times, getting the model to act each time, saving the experience, and performing backprop. Args: num_steps: The number of training steps to execute. exploration_period: The number of steps over which the probability of exploring (taking a random action) is annealed from 1.0 to the model\'s random_action_probability. enable_random: If False, the model will not be able to act randomly / explore.'
def train(self, num_steps=10000, exploration_period=5000, enable_random=True):
tf.logging.info('Evaluating initial model...') self.evaluate_model() self.actions_executed_so_far = 0 if self.stochastic_observations: tf.logging.info('Using stochastic environment') sample_next_obs = False if ((self.exploration_mode == 'boltzmann') or self.stochastic_observations): sample_next_obs = True self.reset_composition() last_observation = self.prime_internal_models() for i in range(num_steps): state = np.array(self.q_network.state_value).flatten() (action, new_observation, reward_scores) = self.action(last_observation, exploration_period, enable_random=enable_random, sample_next_obs=sample_next_obs) new_state = np.array(self.q_network.state_value).flatten() new_reward_state = np.array(self.reward_rnn.state_value).flatten() reward = self.collect_reward(last_observation, new_observation, reward_scores) self.store(last_observation, state, action, reward, new_observation, new_state, new_reward_state) self.reward_last_n += reward self.composition.append(np.argmax(new_observation)) self.beat += 1 if ((i > 0) and ((i % self.output_every_nth) == 0)): tf.logging.info('Evaluating model...') self.evaluate_model() self.save_model(self.algorithm) if (self.algorithm == 'g'): self.rewards_batched.append((self.music_theory_reward_last_n + self.note_rnn_reward_last_n)) else: self.rewards_batched.append(self.reward_last_n) self.music_theory_rewards_batched.append(self.music_theory_reward_last_n) self.note_rnn_rewards_batched.append(self.note_rnn_reward_last_n) save_step = (len(self.rewards_batched) * self.output_every_nth) self.saver.save(self.session, self.save_path, global_step=save_step) r = self.reward_last_n tf.logging.info('Training iteration %s', i) tf.logging.info(' DCTB Reward for last %s steps: %s', self.output_every_nth, r) tf.logging.info(' DCTB DCTB Music theory reward: %s', self.music_theory_reward_last_n) tf.logging.info(' DCTB DCTB Note RNN reward: %s', self.note_rnn_reward_last_n) print('Training iteration', i) print(' DCTB Reward for last', self.output_every_nth, 'steps:', r) print(' DCTB DCTB Music theory reward:', self.music_theory_reward_last_n) print(' DCTB DCTB Note RNN reward:', self.note_rnn_reward_last_n) if (self.exploration_mode == 'egreedy'): exploration_p = rl_tuner_ops.linear_annealing(self.actions_executed_so_far, exploration_period, 1.0, self.dqn_hparams.random_action_probability) tf.logging.info(' DCTB Exploration probability is %s', exploration_p) self.reward_last_n = 0 self.music_theory_reward_last_n = 0 self.note_rnn_reward_last_n = 0 self.training_step() last_observation = new_observation if ((self.beat % self.num_notes_in_melody) == 0): tf.logging.debug('\nResetting composition!\n') self.reset_composition() last_observation = self.prime_internal_models()
'Given an observation, runs the q_network to choose the current action. Does not backprop. Args: observation: A one-hot encoding of a single observation (note). exploration_period: The total length of the period the network will spend exploring, as set in the train function. enable_random: If False, the network cannot act randomly. sample_next_obs: If True, the next observation will be sampled from the softmax probabilities produced by the model, and passed back along with the action. If False, only the action is passed back. Returns: The action chosen, the reward_scores returned by the reward_rnn, and the next observation. If sample_next_obs is False, the next observation is equal to the action.'
def action(self, observation, exploration_period=0, enable_random=True, sample_next_obs=False):
assert (len(observation.shape) == 1), 'Single observation only' self.actions_executed_so_far += 1 if (self.exploration_mode == 'egreedy'): exploration_p = rl_tuner_ops.linear_annealing(self.actions_executed_so_far, exploration_period, 1.0, self.dqn_hparams.random_action_probability) elif (self.exploration_mode == 'boltzmann'): enable_random = False sample_next_obs = True input_batch = np.reshape(observation, (self.q_network.batch_size, 1, self.input_size)) lengths = np.full(self.q_network.batch_size, 1, dtype=int) (action, action_softmax, self.q_network.state_value, reward_scores, self.reward_rnn.state_value) = self.session.run([self.predicted_actions, self.action_softmax, self.q_network.state_tensor, self.reward_scores, self.reward_rnn.state_tensor], {self.q_network.melody_sequence: input_batch, self.q_network.initial_state: self.q_network.state_value, self.q_network.lengths: lengths, self.reward_rnn.melody_sequence: input_batch, self.reward_rnn.initial_state: self.reward_rnn.state_value, self.reward_rnn.lengths: lengths}) reward_scores = np.reshape(reward_scores, self.num_actions) action_softmax = np.reshape(action_softmax, self.num_actions) action = np.reshape(action, self.num_actions) if (enable_random and (random.random() < exploration_p)): note = self.get_random_note() return (note, note, reward_scores) elif (not sample_next_obs): return (action, action, reward_scores) else: obs_note = rl_tuner_ops.sample_softmax(action_softmax) next_obs = np.array(rl_tuner_ops.make_onehot([obs_note], self.num_actions)).flatten() return (action, next_obs, reward_scores)
'Stores an experience in the model\'s experience replay buffer. One experience consists of an initial observation and internal LSTM state, which led to the execution of an action, the receipt of a reward, and finally a new observation and a new LSTM internal state. Args: observation: A one hot encoding of an observed note. state: The internal state of the q_network MelodyRNN LSTM model. action: A one hot encoding of action taken by network. reward: Reward received for taking the action. newobservation: The next observation that resulted from the action. Unless stochastic_observations is True, the action and new observation will be the same. newstate: The internal state of the q_network MelodyRNN that is observed after taking the action. new_reward_state: The internal state of the reward_rnn network that is observed after taking the action'
def store(self, observation, state, action, reward, newobservation, newstate, new_reward_state):
if ((self.num_times_store_called % self.dqn_hparams.store_every_nth) == 0): self.experience.append((observation, state, action, reward, newobservation, newstate, new_reward_state)) self.num_times_store_called += 1
'Backpropagate prediction error from a randomly sampled experience batch. A minibatch of experiences is randomly sampled from the model\'s experience replay buffer and used to update the weights of the q_network and target_q_network.'
def training_step(self):
if ((self.num_times_train_called % self.dqn_hparams.train_every_nth) == 0): if (len(self.experience) < self.dqn_hparams.minibatch_size): return samples = random.sample(range(len(self.experience)), self.dqn_hparams.minibatch_size) samples = [self.experience[i] for i in samples] states = np.empty((len(samples), self.q_network.cell.state_size)) new_states = np.empty((len(samples), self.target_q_network.cell.state_size)) reward_new_states = np.empty((len(samples), self.reward_rnn.cell.state_size)) observations = np.empty((len(samples), self.input_size)) new_observations = np.empty((len(samples), self.input_size)) action_mask = np.zeros((len(samples), self.num_actions)) rewards = np.empty((len(samples),)) lengths = np.full(len(samples), 1, dtype=int) for (i, (o, s, a, r, new_o, new_s, reward_s)) in enumerate(samples): observations[i, :] = o new_observations[i, :] = new_o states[i, :] = s new_states[i, :] = new_s action_mask[i, :] = a rewards[i] = r reward_new_states[i, :] = reward_s observations = np.reshape(observations, (len(samples), 1, self.input_size)) new_observations = np.reshape(new_observations, (len(samples), 1, self.input_size)) calc_summaries = ((self.iteration % 100) == 0) calc_summaries = (calc_summaries and (self.summary_writer is not None)) if (self.algorithm == 'g'): (_, _, target_vals, summary_str) = self.session.run([self.prediction_error, self.train_op, self.target_vals, (self.summarize if calc_summaries else self.no_op1)], {self.reward_rnn.melody_sequence: new_observations, self.reward_rnn.initial_state: reward_new_states, self.reward_rnn.lengths: lengths, self.q_network.melody_sequence: observations, self.q_network.initial_state: states, self.q_network.lengths: lengths, self.target_q_network.melody_sequence: new_observations, self.target_q_network.initial_state: new_states, self.target_q_network.lengths: lengths, self.action_mask: action_mask, self.rewards: rewards}) else: (_, _, target_vals, summary_str) = self.session.run([self.prediction_error, self.train_op, self.target_vals, (self.summarize if calc_summaries else self.no_op1)], {self.q_network.melody_sequence: observations, self.q_network.initial_state: states, self.q_network.lengths: lengths, self.target_q_network.melody_sequence: new_observations, self.target_q_network.initial_state: new_states, self.target_q_network.lengths: lengths, self.action_mask: action_mask, self.rewards: rewards}) total_logs = (self.iteration * self.dqn_hparams.train_every_nth) if ((total_logs % self.output_every_nth) == 0): self.target_val_list.append(np.mean(target_vals)) self.session.run(self.target_network_update) if calc_summaries: self.summary_writer.add_summary(summary_str, self.iteration) self.iteration += 1 self.num_times_train_called += 1
'Used to evaluate the rewards the model receives without exploring. Generates num_trials compositions and computes the note_rnn and music theory rewards. Uses no exploration so rewards directly relate to the model\'s policy. Stores result in internal variables. Args: num_trials: The number of compositions to use for evaluation. sample_next_obs: If True, the next note the model plays will be sampled from its output distribution. If False, the model will deterministically choose the note with maximum value.'
def evaluate_model(self, num_trials=100, sample_next_obs=True):
note_rnn_rewards = ([0] * num_trials) music_theory_rewards = ([0] * num_trials) total_rewards = ([0] * num_trials) for t in range(num_trials): last_observation = self.prime_internal_models() self.reset_composition() for _ in range(self.num_notes_in_melody): (_, new_observation, reward_scores) = self.action(last_observation, 0, enable_random=False, sample_next_obs=sample_next_obs) note_rnn_reward = self.reward_from_reward_rnn_scores(new_observation, reward_scores) music_theory_reward = self.reward_music_theory(new_observation) adjusted_mt_reward = (self.reward_scaler * music_theory_reward) total_reward = (note_rnn_reward + adjusted_mt_reward) note_rnn_rewards[t] = note_rnn_reward music_theory_rewards[t] = (music_theory_reward * self.reward_scaler) total_rewards[t] = total_reward self.composition.append(np.argmax(new_observation)) self.beat += 1 last_observation = new_observation self.eval_avg_reward.append(np.mean(total_rewards)) self.eval_avg_note_rnn_reward.append(np.mean(note_rnn_rewards)) self.eval_avg_music_theory_reward.append(np.mean(music_theory_rewards))
'Calls whatever reward function is indicated in the reward_mode field. New reward functions can be written and called from here. Note that the reward functions can make use of the musical composition that has been played so far, which is stored in self.composition. Some reward functions are made up of many smaller functions, such as those related to music theory. Args: obs: A one-hot encoding of the observed note. action: A one-hot encoding of the chosen action. reward_scores: The value for each note output by the reward_rnn. Returns: Float reward value.'
def collect_reward(self, obs, action, reward_scores):
note_rnn_reward = self.reward_from_reward_rnn_scores(action, reward_scores) self.note_rnn_reward_last_n += note_rnn_reward if (self.reward_mode == 'scale'): reward = self.reward_scale(obs, action) elif (self.reward_mode == 'key'): reward = self.reward_key_distribute_prob(action) elif (self.reward_mode == 'key_and_tonic'): reward = self.reward_key(action) reward += self.reward_tonic(action) elif (self.reward_mode == 'non_repeating'): reward = self.reward_non_repeating(action) elif (self.reward_mode == 'music_theory_random'): reward = self.reward_key(action) reward += self.reward_tonic(action) reward += self.reward_penalize_repeating(action) elif (self.reward_mode == 'music_theory_basic'): reward = self.reward_key(action) reward += self.reward_tonic(action) reward += self.reward_penalize_repeating(action) return ((reward * self.reward_scaler) + note_rnn_reward) elif (self.reward_mode == 'music_theory_basic_plus_variety'): reward = self.reward_key(action) reward += self.reward_tonic(action) reward += self.reward_penalize_repeating(action) reward += self.reward_penalize_autocorrelation(action) return ((reward * self.reward_scaler) + note_rnn_reward) elif (self.reward_mode == 'preferred_intervals'): reward = self.reward_preferred_intervals(action) elif (self.reward_mode == 'music_theory_all'): tf.logging.debug('Note RNN reward: %s', note_rnn_reward) reward = self.reward_music_theory(action) tf.logging.debug('Total music theory reward: %s', (self.reward_scaler * reward)) tf.logging.debug('Total note rnn reward: %s', note_rnn_reward) self.music_theory_reward_last_n += (reward * self.reward_scaler) return ((reward * self.reward_scaler) + note_rnn_reward) elif (self.reward_mode == 'music_theory_only'): reward = self.reward_music_theory(action) else: tf.logging.fatal('ERROR! Not a valid reward mode. Cannot compute reward') self.music_theory_reward_last_n += (reward * self.reward_scaler) return (reward * self.reward_scaler)
'Rewards based on probabilities learned from data by trained RNN. Computes the reward_network\'s learned softmax probabilities. When used as rewards, allows the model to maintain information it learned from data. Args: action: A one-hot encoding of the chosen action. reward_scores: The value for each note output by the reward_rnn. Returns: Float reward value.'
def reward_from_reward_rnn_scores(self, action, reward_scores):
action_note = np.argmax(action) normalization_constant = logsumexp(reward_scores) return (reward_scores[action_note] - normalization_constant)
'Get note scores from the reward_rnn to use as a reward based on data. Runs the reward_rnn on an observation and initial state. Useful for maintaining the probabilities of the original LSTM model while training with reinforcement learning. Args: observation: One-hot encoding of the observed note. state: Vector representing the internal state of the target_q_network LSTM. Returns: Action scores produced by reward_rnn.'
def get_reward_rnn_scores(self, observation, state):
state = np.atleast_2d(state) input_batch = np.reshape(observation, (self.reward_rnn.batch_size, 1, self.num_actions)) lengths = np.full(self.reward_rnn.batch_size, 1, dtype=int) (rewards,) = self.session.run(self.reward_scores, {self.reward_rnn.melody_sequence: input_batch, self.reward_rnn.initial_state: state, self.reward_rnn.lengths: lengths}) return rewards
'Computes cumulative reward for all music theory functions. Args: action: A one-hot encoding of the chosen action. Returns: Float reward value.'
def reward_music_theory(self, action):
reward = self.reward_key(action) tf.logging.debug('Key: %s', reward) prev_reward = reward reward += self.reward_tonic(action) if (reward != prev_reward): tf.logging.debug('Tonic: %s', reward) prev_reward = reward reward += self.reward_penalize_repeating(action) if (reward != prev_reward): tf.logging.debug('Penalize repeating: %s', reward) prev_reward = reward reward += self.reward_penalize_autocorrelation(action) if (reward != prev_reward): tf.logging.debug('Penalize autocorr: %s', reward) prev_reward = reward reward += self.reward_motif(action) if (reward != prev_reward): tf.logging.debug('Reward motif: %s', reward) prev_reward = reward reward += self.reward_repeated_motif(action) if (reward != prev_reward): tf.logging.debug('Reward repeated motif: %s', reward) prev_reward = reward reward += self.reward_preferred_intervals(action) if (reward != prev_reward): tf.logging.debug('Reward preferred_intervals: %s', reward) prev_reward = reward reward += self.reward_leap_up_back(action) if (reward != prev_reward): tf.logging.debug('Reward leap up back: %s', reward) prev_reward = reward reward += self.reward_high_low_unique(action) if (reward != prev_reward): tf.logging.debug('Reward high low unique: %s', reward) return reward
'Modifies reward by a small random values s to pull it towards the mean. If reward is above the mean, s is subtracted; if reward is below the mean, s is added. The random value is in the range 0-0.2. This function is helpful to ensure that the model does not become too certain about playing a particular note. Args: reward: A reward value that has already been computed by another reward function. Returns: Original float reward value modified by scaler.'
def random_reward_shift_to_mean(self, reward):
s = (np.random.randint(0, 2) * 0.1) if (reward > 0.5): reward -= s else: reward += s return reward
'Reward function that trains the model to play a scale. Gives rewards for increasing notes, notes within the desired scale, and two consecutive notes from the scale. Args: obs: A one-hot encoding of the observed note. action: A one-hot encoding of the chosen action. scale: The scale the model should learn. Defaults to C Major if not provided. Returns: Float reward value.'
def reward_scale(self, obs, action, scale=None):
if (scale is None): scale = rl_tuner_ops.C_MAJOR_SCALE obs = np.argmax(obs) action = np.argmax(action) reward = 0 if (action == 1): reward += 0.1 if ((action > obs) and (action < (obs + 3))): reward += 0.05 if (action in scale): reward += 0.01 if (obs in scale): action_pos = scale.index(action) obs_pos = scale.index(obs) if ((obs_pos == (len(scale) - 1)) and (action_pos == 0)): reward += 0.8 elif (action_pos == (obs_pos + 1)): reward += 0.8 return reward