balthou commited on
Commit
24ed670
·
1 Parent(s): 2cf3d27

update with next note

Browse files
Files changed (2) hide show
  1. app.py +54 -31
  2. requirements.txt +1 -1
app.py CHANGED
@@ -2,19 +2,27 @@ from interactive_pipe import interactive_pipeline, interactive, Control, Image
2
  from synthetizer import NOTE_FREQUENCIES, get_note
3
  from interactive_pipe.data_objects.audio import Audio
4
  from pathlib import Path
 
5
  from time import sleep
6
  import argparse
7
  import cv2
8
  import numpy as np
9
 
10
 
11
- def select_note(note="C4", context=None):
12
- context["note"] = note
13
- time_index = context.get("time_index", None)
14
- if time_index is not None:
15
- context["time_index"] += 1
16
- else:
 
 
17
  context["time_index"] = 0
 
 
 
 
 
18
 
19
 
20
  def create_note(context={}):
@@ -32,15 +40,27 @@ def play_note(audio_signal: np.ndarray, context={}):
32
  sleep(0.01)
33
  print("waiting for file")
34
  assert file_name.exists()
35
- context["__set_audio"](file_name)
36
- context["__play"]()
 
 
 
37
 
38
 
39
- def display_color(context={}):
 
 
40
  note = context.get("note", "C4")
41
  return get_color(note, size=(256, 256))
42
 
43
 
 
 
 
 
 
 
 
44
  NOTES_TRANSLATION = ["do", "re", "mi", "fa", "sol", "la", "si", "do2"]
45
  NOTES_CORRESPONDANCE = {
46
  NOTES_TRANSLATION[i]: note for i, note in enumerate(list(NOTE_FREQUENCIES.keys()))}
@@ -85,26 +105,24 @@ def add_border(img, border_size=10, color=(0.2, 0.2, 0.2)):
85
  img[:, -border_size:] = color
86
 
87
 
 
 
 
 
 
 
 
 
88
  def xylo_player():
89
- select_note()
90
  select_song()
 
91
  full_song = song_player()
92
  audio = create_note()
93
  play_note(audio)
94
- out_image = display_color()
95
- return [full_song, out_image]
96
-
97
-
98
- SONG_1 = "au clair de la lune"
99
- SONG_2 = "ainsi font, font, font"
100
-
101
-
102
- def select_song(song: str = SONG_1, context={}):
103
- previous_song = context.get("song", None)
104
- if previous_song != song:
105
- # reset time index
106
- context["time_index"] = 0
107
- context["song"] = song
108
 
109
 
110
  def song_player(context={}):
@@ -114,14 +132,18 @@ def song_player(context={}):
114
  SONG_2: "mi mi do - mi - sol - sol - la sol fa mi re mi do mi mi do - mi - sol - sol - la sol fa mi re do"
115
  }
116
  song_str = SONG.get(song, "")
117
- return generate_song(song_str, current_time=context.get("time_index", 0))
 
 
 
118
 
119
 
120
- def generate_song(song_str, current_time=None):
121
  notes = song_str.split(" ")
122
  all_notes = []
123
  size = (64, 128)
124
  index_no_silence = -1
 
125
  for idx, note in enumerate(notes):
126
  if note in ["-", "."]:
127
  img_note = np.zeros((size[1], size[0], 3))
@@ -134,12 +156,13 @@ def generate_song(song_str, current_time=None):
134
  continue
135
 
136
  img_note = get_color(note_classic, size=size)
137
- color = (0.8, 0., 0.) if current_time == index_no_silence else (
138
- 0.2, 0.2, 0.2)
 
 
 
139
  add_border(img_note, color=color)
140
  all_notes.append(img_note)
141
-
142
- # out_image = np.hstack(all_notes)
143
  max_notes_per_line = 12
144
  remainder = max_notes_per_line - len(all_notes) % max_notes_per_line
145
  for _ in range(remainder):
@@ -147,7 +170,7 @@ def generate_song(song_str, current_time=None):
147
  note_lines = [all_notes[i:i + max_notes_per_line]
148
  for i in range(0, len(all_notes), max_notes_per_line)]
149
  out_image = np.vstack([np.hstack(line) for line in note_lines])
150
- return out_image
151
 
152
 
153
  if __name__ == '__main__':
 
2
  from synthetizer import NOTE_FREQUENCIES, get_note
3
  from interactive_pipe.data_objects.audio import Audio
4
  from pathlib import Path
5
+ from typing import Tuple
6
  from time import sleep
7
  import argparse
8
  import cv2
9
  import numpy as np
10
 
11
 
12
+ SONG_1 = "au clair de la lune"
13
+ SONG_2 = "ainsi font, font, font"
14
+
15
+
16
+ def select_song(song: str = SONG_1, context={}):
17
+ previous_song = context.get("song", None)
18
+ if previous_song != song:
19
+ # reset time index
20
  context["time_index"] = 0
21
+ context["song"] = song
22
+
23
+
24
+ def select_note(note="C4", context={}):
25
+ context["note"] = note
26
 
27
 
28
  def create_note(context={}):
 
40
  sleep(0.01)
41
  print("waiting for file")
42
  assert file_name.exists()
43
+ if context["time_index"] == 0:
44
+ context["__stop"]()
45
+ else:
46
+ context["__set_audio"](file_name)
47
+ context["__play"]()
48
 
49
 
50
+ def display_current_color(context={}):
51
+ if context["time_index"] == 0:
52
+ return np.zeros((256, 256, 3))
53
  note = context.get("note", "C4")
54
  return get_color(note, size=(256, 256))
55
 
56
 
57
+ def display_next_color(context={}):
58
+ target_note = context.get("target_note", None)
59
+ if target_note is None:
60
+ return np.zeros((256, 256, 3))
61
+ return get_color(target_note, size=(256, 256))
62
+
63
+
64
  NOTES_TRANSLATION = ["do", "re", "mi", "fa", "sol", "la", "si", "do2"]
65
  NOTES_CORRESPONDANCE = {
66
  NOTES_TRANSLATION[i]: note for i, note in enumerate(list(NOTE_FREQUENCIES.keys()))}
 
105
  img[:, -border_size:] = color
106
 
107
 
108
+ def increment_time(context: dict = {}) -> None:
109
+ time_index = context.get("time_index", None)
110
+ if time_index is not None:
111
+ context["time_index"] += 1
112
+ else:
113
+ context["time_index"] = 0
114
+
115
+
116
  def xylo_player():
 
117
  select_song()
118
+ select_note()
119
  full_song = song_player()
120
  audio = create_note()
121
  play_note(audio)
122
+ # current_note = display_current_color()
123
+ target_note = display_next_color()
124
+ increment_time()
125
+ return [full_song, target_note]
 
 
 
 
 
 
 
 
 
 
126
 
127
 
128
  def song_player(context={}):
 
132
  SONG_2: "mi mi do - mi - sol - sol - la sol fa mi re mi do mi mi do - mi - sol - sol - la sol fa mi re do"
133
  }
134
  song_str = SONG.get(song, "")
135
+ image_song, target_note = generate_song(
136
+ song_str, current_time=context.get("time_index", 0))
137
+ context["target_note"] = target_note
138
+ return image_song
139
 
140
 
141
+ def generate_song(song_str, current_time=None) -> Tuple[np.ndarray, str]:
142
  notes = song_str.split(" ")
143
  all_notes = []
144
  size = (64, 128)
145
  index_no_silence = -1
146
+ target_note = None
147
  for idx, note in enumerate(notes):
148
  if note in ["-", "."]:
149
  img_note = np.zeros((size[1], size[0], 3))
 
156
  continue
157
 
158
  img_note = get_color(note_classic, size=size)
159
+ if current_time == index_no_silence:
160
+ target_note = note_classic
161
+ color = (0.8, 0., 0.)
162
+ else:
163
+ color = (0.2, 0.2, 0.2)
164
  add_border(img_note, color=color)
165
  all_notes.append(img_note)
 
 
166
  max_notes_per_line = 12
167
  remainder = max_notes_per_line - len(all_notes) % max_notes_per_line
168
  for _ in range(remainder):
 
170
  note_lines = [all_notes[i:i + max_notes_per_line]
171
  for i in range(0, len(all_notes), max_notes_per_line)]
172
  out_image = np.vstack([np.hstack(line) for line in note_lines])
173
+ return out_image, target_note
174
 
175
 
176
  if __name__ == '__main__':
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- interactive-pipe>=0.8.1
2
  wavio
 
1
+ interactive-pipe>=0.8.2
2
  wavio