Spaces:
Running
Running
mizz12
commited on
Commit
·
d45f43f
1
Parent(s):
1767d67
アプリとHTMLファイルを更新
Browse files- app.py +5 -0
- templates/history.html +120 -0
- templates/index.html +100 -76
app.py
CHANGED
@@ -24,6 +24,11 @@ def feedback():
|
|
24 |
def talk_detail():
|
25 |
return render_template('talkDetail.html')
|
26 |
|
|
|
|
|
|
|
|
|
|
|
27 |
# 音声アップロード&解析エンドポイント
|
28 |
@app.route('/upload_audio', methods=['POST'])
|
29 |
def upload_audio():
|
|
|
24 |
def talk_detail():
|
25 |
return render_template('talkDetail.html')
|
26 |
|
27 |
+
# 会話履歴画面(テンプレート: history.html)
|
28 |
+
@app.route('/history', methods=['GET', 'POST'])
|
29 |
+
def history():
|
30 |
+
return render_template('history.html')
|
31 |
+
|
32 |
# 音声アップロード&解析エンドポイント
|
33 |
@app.route('/upload_audio', methods=['POST'])
|
34 |
def upload_audio():
|
templates/history.html
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="ja">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8" />
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
6 |
+
<title>会話履歴</title>
|
7 |
+
<style>
|
8 |
+
body {
|
9 |
+
margin: 0;
|
10 |
+
padding: 0;
|
11 |
+
font-family: Arial, sans-serif;
|
12 |
+
background-color: #fff;
|
13 |
+
color: #000;
|
14 |
+
}
|
15 |
+
header {
|
16 |
+
padding: 16px;
|
17 |
+
background-color: #f5f5f5;
|
18 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
19 |
+
font-size: 20px;
|
20 |
+
font-weight: bold;
|
21 |
+
text-align: center;
|
22 |
+
}
|
23 |
+
.recording-list {
|
24 |
+
padding: 16px;
|
25 |
+
}
|
26 |
+
.record-item {
|
27 |
+
display: flex;
|
28 |
+
justify-content: space-between;
|
29 |
+
align-items: center;
|
30 |
+
padding: 12px;
|
31 |
+
margin: 8px 0;
|
32 |
+
border-radius: 8px;
|
33 |
+
background-color: #e9e9e9;
|
34 |
+
transition: background-color 0.2s ease;
|
35 |
+
cursor: pointer;
|
36 |
+
}
|
37 |
+
.record-item:hover {
|
38 |
+
background-color: #d3d3d3;
|
39 |
+
}
|
40 |
+
.title {
|
41 |
+
font-size: 18px;
|
42 |
+
font-weight: bold;
|
43 |
+
}
|
44 |
+
.timestamp {
|
45 |
+
font-size: 14px;
|
46 |
+
color: #555;
|
47 |
+
}
|
48 |
+
.record-item-template {
|
49 |
+
display: none;
|
50 |
+
}
|
51 |
+
button {
|
52 |
+
margin: 5px;
|
53 |
+
padding: 10px 20px;
|
54 |
+
border: none;
|
55 |
+
border-radius: 4px; /* 4pxに統一 */
|
56 |
+
background-color: #007bff;
|
57 |
+
color: #fff;
|
58 |
+
cursor: pointer;
|
59 |
+
position: fixed; /* 画面に固定 */
|
60 |
+
left: 50%; /* 水平方向の中央 */
|
61 |
+
transform: translateX(-50%); /* 中央に配置 */
|
62 |
+
bottom: 20px; /* 画面下に配置 */
|
63 |
+
}
|
64 |
+
.history-button:hover {
|
65 |
+
background-color: #0056b3;
|
66 |
+
}
|
67 |
+
button:hover {
|
68 |
+
background-color: #0056b3;
|
69 |
+
}
|
70 |
+
</style>
|
71 |
+
<script>
|
72 |
+
const recordings = [
|
73 |
+
{ title: "Recording 1", time: "01:15:35", date: "2/26/2025" },
|
74 |
+
{ title: "Recording 2", time: "00:16:41", date: "2/10/2025" },
|
75 |
+
];
|
76 |
+
|
77 |
+
function createRecordItem(title, time, date) {
|
78 |
+
const template = document.querySelector(".record-item-template");
|
79 |
+
const item = template.cloneNode(true);
|
80 |
+
item.classList.remove("record-item-template");
|
81 |
+
item.style.display = "flex";
|
82 |
+
item.querySelector(".title").textContent = title;
|
83 |
+
item.querySelector(".timestamp").textContent = `${time} | ${date}`;
|
84 |
+
item.onclick = () => (location.href = "talkDetail");
|
85 |
+
return item;
|
86 |
+
}
|
87 |
+
|
88 |
+
function renderRecordings() {
|
89 |
+
const list = document.querySelector(".recording-list");
|
90 |
+
list.innerHTML = "";
|
91 |
+
recordings.forEach((rec) => {
|
92 |
+
const item = createRecordItem(rec.title, rec.time, rec.date);
|
93 |
+
list.appendChild(item);
|
94 |
+
});
|
95 |
+
}
|
96 |
+
|
97 |
+
window.onload = renderRecordings;
|
98 |
+
|
99 |
+
//画面遷移
|
100 |
+
function showRecorder() {
|
101 |
+
// 録音画面へ遷移
|
102 |
+
window.location.href = "/index";
|
103 |
+
}
|
104 |
+
</script>
|
105 |
+
</head>
|
106 |
+
<body>
|
107 |
+
<header>All Recordings</header>
|
108 |
+
<div class="recording-list">
|
109 |
+
<div class="record-item record-item-template">
|
110 |
+
<div>
|
111 |
+
<div class="title">Recording Title</div>
|
112 |
+
<div class="timestamp">00:00:00 | 1/1/2025</div>
|
113 |
+
</div>
|
114 |
+
</div>
|
115 |
+
</div>
|
116 |
+
<button class="history-button" id="detailButton" onclick="showRecorder()">
|
117 |
+
録音画面を表示
|
118 |
+
</button>
|
119 |
+
</body>
|
120 |
+
</html>
|
templates/index.html
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="ja">
|
3 |
<head>
|
4 |
-
<meta charset="UTF-8"
|
5 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0"
|
6 |
<title>Voice Recorder Interface</title>
|
7 |
<style>
|
8 |
body {
|
@@ -85,8 +85,12 @@
|
|
85 |
</button>
|
86 |
|
87 |
<div class="result-buttons">
|
88 |
-
<button class="result-button" id="historyButton" onclick="showHistory()"
|
89 |
-
|
|
|
|
|
|
|
|
|
90 |
</div>
|
91 |
|
92 |
<script>
|
@@ -94,44 +98,48 @@
|
|
94 |
let mediaRecorder;
|
95 |
let audioChunks = [];
|
96 |
let recordingInterval;
|
97 |
-
let count_voice=0;
|
98 |
-
let before_rate=0;
|
99 |
// Chart.js の初期化
|
100 |
-
const ctx = document.getElementById(
|
101 |
const speechChart = new Chart(ctx, {
|
102 |
-
type:
|
103 |
data: {
|
104 |
-
labels: [
|
105 |
-
datasets: [
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
109 |
},
|
110 |
options: {
|
111 |
responsive: true,
|
112 |
plugins: {
|
113 |
legend: {
|
114 |
display: true,
|
115 |
-
position:
|
116 |
-
labels: { color:
|
117 |
-
}
|
118 |
-
}
|
119 |
-
}
|
120 |
});
|
121 |
|
122 |
async function toggleRecording() {
|
123 |
-
const recordButton = document.getElementById(
|
124 |
|
125 |
if (!isRecording) {
|
126 |
// 録音開始
|
127 |
isRecording = true;
|
128 |
-
recordButton.classList.add(
|
129 |
try {
|
130 |
-
const stream = await navigator.mediaDevices.getUserMedia({
|
|
|
|
|
131 |
mediaRecorder = new MediaRecorder(stream);
|
132 |
audioChunks = [];
|
133 |
|
134 |
-
mediaRecorder.ondataavailable = event => {
|
135 |
if (event.data.size > 0) {
|
136 |
audioChunks.push(event.data);
|
137 |
}
|
@@ -147,93 +155,109 @@
|
|
147 |
recordingInterval = setInterval(() => {
|
148 |
if (mediaRecorder && mediaRecorder.state === "recording") {
|
149 |
mediaRecorder.stop();
|
150 |
-
|
151 |
}
|
152 |
}, 10000);
|
153 |
} catch (error) {
|
154 |
-
console.error(
|
155 |
isRecording = false;
|
156 |
-
recordButton.classList.remove(
|
157 |
}
|
158 |
} else {
|
159 |
// 手動停止
|
160 |
isRecording = false;
|
161 |
-
recordButton.classList.remove(
|
162 |
clearInterval(recordingInterval);
|
163 |
if (mediaRecorder && mediaRecorder.state === "recording") {
|
164 |
mediaRecorder.stop();
|
165 |
-
count_voice=0;
|
166 |
-
before_rate=0;
|
167 |
}
|
168 |
}
|
169 |
}
|
170 |
|
171 |
function sendAudioChunks(chunks) {
|
172 |
-
const audioBlob = new Blob(chunks, { type:
|
173 |
const reader = new FileReader();
|
174 |
reader.onloadend = () => {
|
175 |
-
const base64String = reader.result.split(
|
176 |
-
fetch(
|
177 |
-
method:
|
178 |
headers: {
|
179 |
-
|
180 |
},
|
181 |
body: JSON.stringify({ audio_data: base64String }),
|
182 |
})
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
}
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
|
|
223 |
};
|
224 |
reader.readAsDataURL(audioBlob);
|
225 |
}
|
226 |
|
227 |
function showHistory() {
|
228 |
// 会話履歴表示の画面があれば、そのページへ遷移する例
|
229 |
-
|
230 |
-
alert('会話履歴を表示する機能は未実装です。');
|
231 |
}
|
232 |
|
233 |
function showResults() {
|
234 |
// フィードバック画面へ遷移
|
235 |
-
window.location.href =
|
236 |
}
|
237 |
</script>
|
238 |
</body>
|
239 |
-
</html>
|
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="ja">
|
3 |
<head>
|
4 |
+
<meta charset="UTF-8" />
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
6 |
<title>Voice Recorder Interface</title>
|
7 |
<style>
|
8 |
body {
|
|
|
85 |
</button>
|
86 |
|
87 |
<div class="result-buttons">
|
88 |
+
<button class="result-button" id="historyButton" onclick="showHistory()">
|
89 |
+
会話履歴を表示
|
90 |
+
</button>
|
91 |
+
<button class="result-button" id="feedbackButton" onclick="showResults()">
|
92 |
+
フィードバック画面を表示
|
93 |
+
</button>
|
94 |
</div>
|
95 |
|
96 |
<script>
|
|
|
98 |
let mediaRecorder;
|
99 |
let audioChunks = [];
|
100 |
let recordingInterval;
|
101 |
+
let count_voice = 0;
|
102 |
+
let before_rate = 0;
|
103 |
// Chart.js の初期化
|
104 |
+
const ctx = document.getElementById("speechChart").getContext("2d");
|
105 |
const speechChart = new Chart(ctx, {
|
106 |
+
type: "doughnut",
|
107 |
data: {
|
108 |
+
labels: ["自分", "他の人"],
|
109 |
+
datasets: [
|
110 |
+
{
|
111 |
+
data: [30, 70],
|
112 |
+
backgroundColor: ["#4caf50", "#757575"],
|
113 |
+
},
|
114 |
+
],
|
115 |
},
|
116 |
options: {
|
117 |
responsive: true,
|
118 |
plugins: {
|
119 |
legend: {
|
120 |
display: true,
|
121 |
+
position: "bottom",
|
122 |
+
labels: { color: "white" },
|
123 |
+
},
|
124 |
+
},
|
125 |
+
},
|
126 |
});
|
127 |
|
128 |
async function toggleRecording() {
|
129 |
+
const recordButton = document.getElementById("recordButton");
|
130 |
|
131 |
if (!isRecording) {
|
132 |
// 録音開始
|
133 |
isRecording = true;
|
134 |
+
recordButton.classList.add("recording");
|
135 |
try {
|
136 |
+
const stream = await navigator.mediaDevices.getUserMedia({
|
137 |
+
audio: true,
|
138 |
+
});
|
139 |
mediaRecorder = new MediaRecorder(stream);
|
140 |
audioChunks = [];
|
141 |
|
142 |
+
mediaRecorder.ondataavailable = (event) => {
|
143 |
if (event.data.size > 0) {
|
144 |
audioChunks.push(event.data);
|
145 |
}
|
|
|
155 |
recordingInterval = setInterval(() => {
|
156 |
if (mediaRecorder && mediaRecorder.state === "recording") {
|
157 |
mediaRecorder.stop();
|
|
|
158 |
}
|
159 |
}, 10000);
|
160 |
} catch (error) {
|
161 |
+
console.error("マイクへのアクセスに失敗しました:", error);
|
162 |
isRecording = false;
|
163 |
+
recordButton.classList.remove("recording");
|
164 |
}
|
165 |
} else {
|
166 |
// 手動停止
|
167 |
isRecording = false;
|
168 |
+
recordButton.classList.remove("recording");
|
169 |
clearInterval(recordingInterval);
|
170 |
if (mediaRecorder && mediaRecorder.state === "recording") {
|
171 |
mediaRecorder.stop();
|
172 |
+
count_voice = 0;
|
173 |
+
before_rate = 0;
|
174 |
}
|
175 |
}
|
176 |
}
|
177 |
|
178 |
function sendAudioChunks(chunks) {
|
179 |
+
const audioBlob = new Blob(chunks, { type: "audio/wav" });
|
180 |
const reader = new FileReader();
|
181 |
reader.onloadend = () => {
|
182 |
+
const base64String = reader.result.split(",")[1]; // Base64エンコードされた音声データ
|
183 |
+
fetch("/upload_audio", {
|
184 |
+
method: "POST",
|
185 |
headers: {
|
186 |
+
"Content-Type": "application/json",
|
187 |
},
|
188 |
body: JSON.stringify({ audio_data: base64String }),
|
189 |
})
|
190 |
+
.then((response) => response.json())
|
191 |
+
.then((data) => {
|
192 |
+
if (data.error) {
|
193 |
+
alert("エラー: " + data.error);
|
194 |
+
console.error(data.details);
|
195 |
+
} else if (data.rate !== undefined) {
|
196 |
+
// 解析結果が返ってきた場合はチャートを更新
|
197 |
+
|
198 |
+
if (count_voice == 0) {
|
199 |
+
speechChart.data.datasets[0].data = [
|
200 |
+
data.rate,
|
201 |
+
100 - data.rate,
|
202 |
+
];
|
203 |
+
before_rate = data.rate;
|
204 |
+
} else if (count_voice == 1) {
|
205 |
+
let tmp_rate = (data.rate + before_rate) / 2; //データ数が二つだから平均をとる
|
206 |
+
speechChart.data.datasets[0].data = [
|
207 |
+
tmp_rate,
|
208 |
+
100 - tmp_rate,
|
209 |
+
];
|
210 |
+
console.log(before_rate, tmp_rate, 100 - tmp_rate);
|
211 |
+
before_rate = tmp_rate;
|
212 |
+
} else {
|
213 |
+
let tmp_rate = (data.rate + before_rate * 2) / 3; //過去のやつに重みを付けて三つで考える。
|
214 |
+
speechChart.data.datasets[0].data = [
|
215 |
+
tmp_rate,
|
216 |
+
100 - tmp_rate,
|
217 |
+
];
|
218 |
+
console.log(before_rate, tmp_rate, 100 - tmp_rate);
|
219 |
+
before_rate = tmp_rate; //ここのrateを保存しておく
|
220 |
+
}
|
221 |
+
count_voice++;
|
222 |
+
|
223 |
+
speechChart.update();
|
224 |
+
//lert('音声の解析が完了しました。自分の音声: ' + data.rate.toFixed(2) + '%, 他の人: ' + (100 - data.rate).toFixed(2) + '%');
|
225 |
+
} else {
|
226 |
+
alert("音声がバックエンドに送信されました。");
|
227 |
}
|
228 |
+
// 録音が継続中であれば、再度録音を開始(自動録音の連続処理)
|
229 |
+
if (
|
230 |
+
isRecording &&
|
231 |
+
mediaRecorder &&
|
232 |
+
mediaRecorder.state === "inactive"
|
233 |
+
) {
|
234 |
+
mediaRecorder.start();
|
235 |
+
}
|
236 |
+
})
|
237 |
+
.catch((error) => {
|
238 |
+
console.error("エラー:", error);
|
239 |
+
if (
|
240 |
+
isRecording &&
|
241 |
+
mediaRecorder &&
|
242 |
+
mediaRecorder.state === "inactive"
|
243 |
+
) {
|
244 |
+
mediaRecorder.start();
|
245 |
+
}
|
246 |
+
});
|
247 |
};
|
248 |
reader.readAsDataURL(audioBlob);
|
249 |
}
|
250 |
|
251 |
function showHistory() {
|
252 |
// 会話履歴表示の画面があれば、そのページへ遷移する例
|
253 |
+
window.location.href = "history";
|
254 |
+
//alert('会話履歴を表示する機能は未実装です。');
|
255 |
}
|
256 |
|
257 |
function showResults() {
|
258 |
// フィードバック画面へ遷移
|
259 |
+
window.location.href = "feedback";
|
260 |
}
|
261 |
</script>
|
262 |
</body>
|
263 |
+
</html>
|