balthou commited on
Commit
6c6e316
·
1 Parent(s): 77cfd66

add time tracker

Browse files
Files changed (1) hide show
  1. app.py +36 -14
app.py CHANGED
@@ -10,6 +10,11 @@ import numpy as np
10
 
11
  def select_note(note="C4", context=None):
12
  context["note"] = note
 
 
 
 
 
13
 
14
 
15
  def create_note(context={}):
@@ -56,12 +61,6 @@ def get_color(note, size=(256, 256)):
56
  index = list(NOTE_FREQUENCIES.keys()).index(note)
57
  color = colors.get(list(colors.keys())[index], [0., 0., 0.])
58
  img = np.ones((size[1], size[0], 3)) * np.array(color)[None, None, :]
59
- border_size = 4
60
- border_color = (0.2, 0.2, 0.2)
61
- img[:border_size, :] = border_color
62
- img[-border_size:, :] = border_color
63
- img[:, :border_size] = border_color
64
- img[:, -border_size:] = border_color
65
  text = NOTES_TRANSLATION[index].upper()
66
  font_scale = size[0] // 64
67
  thickness = 2
@@ -78,9 +77,18 @@ def get_color(note, size=(256, 256)):
78
  return img
79
 
80
 
 
 
 
 
 
 
 
 
81
  def xylo_player():
82
- full_song = song_player()
83
  select_note()
 
 
84
  audio = create_note()
85
  play_note(audio)
86
  out_image = display_color()
@@ -91,28 +99,44 @@ SONG_1 = "au clair de la lune"
91
  SONG_2 = "ainsi font, font, font"
92
 
93
 
94
- def song_player(song: str=SONG_1, context={}):
 
 
 
 
 
 
 
 
 
95
  SONG = {
96
  SONG_1: "fa fa fa sol la - sol - la sol sol fa - fa fa fa sol la",
97
  SONG_2: "mi mi do - mi - sol - sol - la sol fa mi re mi do mi mi do - mi - sol - sol - la sol fa mi re do"
98
  }
99
  song_str = SONG.get(song, "")
100
- return generate_song(song_str)
101
 
102
 
103
- def generate_song(song_str):
104
  notes = song_str.split(" ")
105
  all_notes = []
106
  size = (64, 128)
107
- for note in notes:
 
108
  if note in ["-", "."]:
109
  img_note = np.zeros((size[1], size[0], 3))
 
110
  else:
111
  note_classic = NOTES_CORRESPONDANCE.get(note, None)
 
112
  if note_classic is None:
113
  print(f"Note {note} not found")
114
  continue
 
115
  img_note = get_color(note_classic, size=size)
 
 
 
116
  all_notes.append(img_note)
117
 
118
  # out_image = np.hstack(all_notes)
@@ -120,8 +144,6 @@ def generate_song(song_str):
120
  remainder = max_notes_per_line - len(all_notes) % max_notes_per_line
121
  for _ in range(remainder):
122
  all_notes.append(np.zeros_like(all_notes[0]))
123
- # out_image = np.hstack(all_notes)
124
- # print(len(all_notes))
125
  note_lines = [all_notes[i:i + max_notes_per_line]
126
  for i in range(0, len(all_notes), max_notes_per_line)]
127
  out_image = np.vstack([np.hstack(line) for line in note_lines])
@@ -139,7 +161,7 @@ if __name__ == '__main__':
139
  img = get_color(note, size=(512, 512))
140
  Image.save_image(img, icon)
141
  interactive(note=Control("C4", all_notes, icons=icon_list))(select_note)
142
- interactive(song=(SONG_1, [SONG_1, SONG_2]))(song_player)
143
  interactive_pipeline(
144
  gui=args.backend,
145
  cache=False,
 
10
 
11
  def select_note(note="C4", context=None):
12
  context["note"] = note
13
+ time_index = context.get("time_index", None)
14
+ if time_index is not None:
15
+ context["time_index"] += 1
16
+ else:
17
+ context["time_index"] = 0
18
 
19
 
20
  def create_note(context={}):
 
61
  index = list(NOTE_FREQUENCIES.keys()).index(note)
62
  color = colors.get(list(colors.keys())[index], [0., 0., 0.])
63
  img = np.ones((size[1], size[0], 3)) * np.array(color)[None, None, :]
 
 
 
 
 
 
64
  text = NOTES_TRANSLATION[index].upper()
65
  font_scale = size[0] // 64
66
  thickness = 2
 
77
  return img
78
 
79
 
80
+ def add_border(img, border_size=10, color=(0.2, 0.2, 0.2)):
81
+ border_size = 4
82
+ img[:border_size, :] = color
83
+ img[-border_size:, :] = color
84
+ img[:, :border_size] = color
85
+ img[:, -border_size:] = color
86
+
87
+
88
  def xylo_player():
 
89
  select_note()
90
+ select_song()
91
+ full_song = song_player()
92
  audio = create_note()
93
  play_note(audio)
94
  out_image = display_color()
 
99
  SONG_2 = "ainsi font, font, font"
100
 
101
 
102
+ def select_song(song: str = SONG_1, context={}):
103
+ previous_song = context.get("song", None)
104
+ if previous_song != song:
105
+ # reset time index
106
+ context["time_index"] = 0
107
+ context["song"] = song
108
+
109
+
110
+ def song_player(context={}):
111
+ song = context["song"]
112
  SONG = {
113
  SONG_1: "fa fa fa sol la - sol - la sol sol fa - fa fa fa sol la",
114
  SONG_2: "mi mi do - mi - sol - sol - la sol fa mi re mi do mi mi do - mi - sol - sol - la sol fa mi re do"
115
  }
116
  song_str = SONG.get(song, "")
117
+ return generate_song(song_str, current_time=context.get("time_index", 0))
118
 
119
 
120
+ def generate_song(song_str, current_time=None):
121
  notes = song_str.split(" ")
122
  all_notes = []
123
  size = (64, 128)
124
+ index_no_silence = -1
125
+ for idx, note in enumerate(notes):
126
  if note in ["-", "."]:
127
  img_note = np.zeros((size[1], size[0], 3))
128
+ color = (0.2, 0.2, 0.2)
129
  else:
130
  note_classic = NOTES_CORRESPONDANCE.get(note, None)
131
+ index_no_silence += 1
132
  if note_classic is None:
133
  print(f"Note {note} not found")
134
  continue
135
+
136
  img_note = get_color(note_classic, size=size)
137
+ color = (0.8, 0., 0.) if current_time == index_no_silence else (
138
+ 0.2, 0.2, 0.2)
139
+ add_border(img_note, color=color)
140
  all_notes.append(img_note)
141
 
142
  # out_image = np.hstack(all_notes)
 
144
  remainder = max_notes_per_line - len(all_notes) % max_notes_per_line
145
  for _ in range(remainder):
146
  all_notes.append(np.zeros_like(all_notes[0]))
 
 
147
  note_lines = [all_notes[i:i + max_notes_per_line]
148
  for i in range(0, len(all_notes), max_notes_per_line)]
149
  out_image = np.vstack([np.hstack(line) for line in note_lines])
 
161
  img = get_color(note, size=(512, 512))
162
  Image.save_image(img, icon)
163
  interactive(note=Control("C4", all_notes, icons=icon_list))(select_note)
164
+ interactive(song=(SONG_1, [SONG_1, SONG_2]))(select_song)
165
  interactive_pipeline(
166
  gui=args.backend,
167
  cache=False,