stereoDrift commited on
Commit
a715dd9
·
verified ·
1 Parent(s): 58cfde3

Upload 9 files

Browse files
Files changed (10) hide show
  1. .gitattributes +1 -0
  2. README.md +78 -12
  3. SpeechManager.js +361 -0
  4. assets/Stan.gltf +0 -0
  5. assets/siteOGImage.jpg +3 -0
  6. audioManager.js +85 -0
  7. game.js +0 -0
  8. index.html +56 -18
  9. main.js +12 -0
  10. styles.css +44 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/siteOGImage.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,78 @@
1
- ---
2
- title: 3d Model Playground
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- short_description: control 3D models with voice + hand gestures
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3D Model Playground
2
+
3
+ Control 3D models using hand gestures and voice commands in real-time.
4
+
5
+ An interactive web app built with threejs, mediapipe computer vision, web speech API, and rosebud AI.
6
+
7
+ - Say "drag", "rotate", "scale", or "animate" to change the interaction mode
8
+ - Pinch fingers to control the 3D model
9
+ - Drag/drop a new 3D model onto the page to import it (GLB/GLTF format)
10
+
11
+ [Video](https://youtu.be/_I1E44Fp1Es?si=lR2otqR_-ZGdIGXT) | [Live Demo](https://collidingscopes.github.io/3d-model-playground/)
12
+
13
+ ## Requirements
14
+
15
+ - Modern web browser with WebGL support
16
+ - Camera / microphone access
17
+
18
+ ## Technologies
19
+
20
+ - **Three.js** for 3D rendering
21
+ - **MediaPipe** for hand tracking and gesture recognition
22
+ - **Web Speech API** for speech recognition
23
+ - **HTML5 Canvas** for visual feedback
24
+ - **JavaScript** for real-time interaction
25
+
26
+ ## Setup for Development
27
+
28
+ ```bash
29
+ # Clone this repository
30
+ git clone https://github.com/collidingScopes/3d-model-playground
31
+
32
+ # Navigate to the project directory
33
+ cd 3d-model-playground
34
+
35
+ # Serve with your preferred method (example using Python)
36
+ python -m http.server
37
+ ```
38
+
39
+ Then navigate to `http://localhost:8000` in your browser.
40
+
41
+ ## License
42
+
43
+ MIT License
44
+
45
+ ## Credits
46
+
47
+ - Three.js - https://threejs.org/
48
+ - MediaPipe - https://mediapipe.dev/
49
+ - Rosebud AI - https://rosebud.ai/
50
+ - Quaternius 3D models - https://quaternius.com/
51
+
52
+ ## Related Projects
53
+
54
+ I've released several computer vision projects (with code + tutorials) here:
55
+ [Fun With Computer Vision](https://www.funwithcomputervision.com/)
56
+
57
+ You can purchase lifetime access and receive the full project files and tutorials. I'm adding more content regularly :)
58
+
59
+ You might also like some of my other open source projects:
60
+
61
+ - [Particular Drift](https://collidingScopes.github.io/particular-drift) - Turn photos into flowing particle animations
62
+ - [Liquid Logo](https://collidingScopes.github.io/liquid-logo) - Transform logos and icons into liquid metal animations
63
+ - [Video-to-ASCII](https://collidingScopes.github.io/ascii) - Convert videos into ASCII pixel art
64
+
65
+ ## Contact
66
+
67
+ - Instagram: [@stereo.drift](https://www.instagram.com/stereo.drift/)
68
+ - Twitter/X: [@measure_plan](https://x.com/measure_plan)
69
+ - Email: [[email protected]](mailto:[email protected])
70
+ - GitHub: [collidingScopes](https://github.com/collidingScopes)
71
+
72
+ ## Donations
73
+
74
+ If you enjoyed this, feel free to buy me a coffee.
75
+
76
+ My name is Alan, and I enjoy building open source software for computer vision, games, and more. This would be much appreciated during late-night coding sessions!
77
+
78
+ [![Buy Me A Coffee](https://www.buymeacoffee.com/assets/img/custom_images/yellow_img.png)](https://www.buymeacoffee.com/stereoDrift)
SpeechManager.js ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) {
2
+ try {
3
+ var info = gen[key](arg);
4
+ var value = info.value;
5
+ } catch (error) {
6
+ reject(error);
7
+ return;
8
+ }
9
+ if (info.done) {
10
+ resolve(value);
11
+ } else {
12
+ Promise.resolve(value).then(_next, _throw);
13
+ }
14
+ }
15
+ function _async_to_generator(fn) {
16
+ return function() {
17
+ var self = this, args = arguments;
18
+ return new Promise(function(resolve, reject) {
19
+ var gen = fn.apply(self, args);
20
+ function _next(value) {
21
+ asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value);
22
+ }
23
+ function _throw(err) {
24
+ asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err);
25
+ }
26
+ _next(undefined);
27
+ });
28
+ };
29
+ }
30
+ function _class_call_check(instance, Constructor) {
31
+ if (!(instance instanceof Constructor)) {
32
+ throw new TypeError("Cannot call a class as a function");
33
+ }
34
+ }
35
+ function _defineProperties(target, props) {
36
+ for(var i = 0; i < props.length; i++){
37
+ var descriptor = props[i];
38
+ descriptor.enumerable = descriptor.enumerable || false;
39
+ descriptor.configurable = true;
40
+ if ("value" in descriptor) descriptor.writable = true;
41
+ Object.defineProperty(target, descriptor.key, descriptor);
42
+ }
43
+ }
44
+ function _create_class(Constructor, protoProps, staticProps) {
45
+ if (protoProps) _defineProperties(Constructor.prototype, protoProps);
46
+ if (staticProps) _defineProperties(Constructor, staticProps);
47
+ return Constructor;
48
+ }
49
+ function _ts_generator(thisArg, body) {
50
+ var f, y, t, g, _ = {
51
+ label: 0,
52
+ sent: function() {
53
+ if (t[0] & 1) throw t[1];
54
+ return t[1];
55
+ },
56
+ trys: [],
57
+ ops: []
58
+ };
59
+ return g = {
60
+ next: verb(0),
61
+ "throw": verb(1),
62
+ "return": verb(2)
63
+ }, typeof Symbol === "function" && (g[Symbol.iterator] = function() {
64
+ return this;
65
+ }), g;
66
+ function verb(n) {
67
+ return function(v) {
68
+ return step([
69
+ n,
70
+ v
71
+ ]);
72
+ };
73
+ }
74
+ function step(op) {
75
+ if (f) throw new TypeError("Generator is already executing.");
76
+ while(_)try {
77
+ if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
78
+ if (y = 0, t) op = [
79
+ op[0] & 2,
80
+ t.value
81
+ ];
82
+ switch(op[0]){
83
+ case 0:
84
+ case 1:
85
+ t = op;
86
+ break;
87
+ case 4:
88
+ _.label++;
89
+ return {
90
+ value: op[1],
91
+ done: false
92
+ };
93
+ case 5:
94
+ _.label++;
95
+ y = op[1];
96
+ op = [
97
+ 0
98
+ ];
99
+ continue;
100
+ case 7:
101
+ op = _.ops.pop();
102
+ _.trys.pop();
103
+ continue;
104
+ default:
105
+ if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
106
+ _ = 0;
107
+ continue;
108
+ }
109
+ if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
110
+ _.label = op[1];
111
+ break;
112
+ }
113
+ if (op[0] === 6 && _.label < t[1]) {
114
+ _.label = t[1];
115
+ t = op;
116
+ break;
117
+ }
118
+ if (t && _.label < t[2]) {
119
+ _.label = t[2];
120
+ _.ops.push(op);
121
+ break;
122
+ }
123
+ if (t[2]) _.ops.pop();
124
+ _.trys.pop();
125
+ continue;
126
+ }
127
+ op = body.call(thisArg, _);
128
+ } catch (e) {
129
+ op = [
130
+ 6,
131
+ e
132
+ ];
133
+ y = 0;
134
+ } finally{
135
+ f = t = 0;
136
+ }
137
+ if (op[0] & 5) throw op[1];
138
+ return {
139
+ value: op[0] ? op[1] : void 0,
140
+ done: true
141
+ };
142
+ }
143
+ }
144
+ export var SpeechManager = /*#__PURE__*/ function() {
145
+ "use strict";
146
+ function SpeechManager(onTranscript, onRecognitionActive, onCommandRecognized) {
147
+ var _this = this;
148
+ _class_call_check(this, SpeechManager);
149
+ this.onTranscript = onTranscript;
150
+ this.onRecognitionActive = onRecognitionActive; // Callback for recognition state
151
+ this.onCommandRecognized = onCommandRecognized; // Callback for recognized commands
152
+ this.recognition = null;
153
+ this.isRecognizing = false;
154
+ this.finalTranscript = '';
155
+ this.interimTranscript = '';
156
+ var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
157
+ if (SpeechRecognition) {
158
+ this.recognition = new SpeechRecognition();
159
+ this.recognition.continuous = true; // Keep listening even after a pause
160
+ this.recognition.interimResults = true; // Get results while speaking
161
+ this.recognition.onstart = function() {
162
+ _this.isRecognizing = true;
163
+ console.log('Speech recognition started.');
164
+ if (_this.onRecognitionActive) _this.onRecognitionActive(true);
165
+ };
166
+ this.recognition.onresult = function(event) {
167
+ _this.interimTranscript = '';
168
+ for(var i = event.resultIndex; i < event.results.length; ++i){
169
+ if (event.results[i].isFinal) {
170
+ // Append to finalTranscript and then clear it for the next utterance
171
+ // This way, `finalTranscript` holds the *current complete* utterance.
172
+ var currentFinalTranscript = event.results[i][0].transcript.trim().toLowerCase();
173
+ _this.finalTranscript += currentFinalTranscript; // Append to potentially longer session transcript if needed, though we process per utterance
174
+ if (_this.onTranscript) {
175
+ // Display the raw transcript before processing as command
176
+ _this.onTranscript(event.results[i][0].transcript, ''); // Send final, clear interim
177
+ }
178
+ // Check for commands
179
+ var commandMap = {
180
+ 'drag': 'drag',
181
+ 'rotate': 'rotate',
182
+ 'rotation': 'rotate',
183
+ 'scale': 'scale',
184
+ 'size': 'scale',
185
+ 'zoom': 'scale',
186
+ 'animate': 'animate',
187
+ 'anime': 'animate',
188
+ 'animation': 'animate' // Alias for animate
189
+ };
190
+ var spokenCommands = Object.keys(commandMap);
191
+ var _iteratorNormalCompletion = true, _didIteratorError = false, _iteratorError = undefined;
192
+ try {
193
+ for(var _iterator = spokenCommands[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true){
194
+ var spokenCmd = _step.value;
195
+ if (currentFinalTranscript.includes(spokenCmd)) {
196
+ var actualCommand = commandMap[spokenCmd];
197
+ if (_this.onCommandRecognized) {
198
+ _this.onCommandRecognized(actualCommand);
199
+ }
200
+ break; // Process the first command found (and its alias)
201
+ }
202
+ }
203
+ } catch (err) {
204
+ _didIteratorError = true;
205
+ _iteratorError = err;
206
+ } finally{
207
+ try {
208
+ if (!_iteratorNormalCompletion && _iterator.return != null) {
209
+ _iterator.return();
210
+ }
211
+ } finally{
212
+ if (_didIteratorError) {
213
+ throw _iteratorError;
214
+ }
215
+ }
216
+ }
217
+ // Reset finalTranscript for the next full utterance if you are processing utterance by utterance
218
+ // If you want to accumulate, then don't reset here.
219
+ // For command processing, resetting per utterance is usually best.
220
+ _this.finalTranscript = '';
221
+ } else {
222
+ _this.interimTranscript += event.results[i][0].transcript;
223
+ if (_this.onTranscript) {
224
+ _this.onTranscript(null, _this.interimTranscript);
225
+ }
226
+ }
227
+ }
228
+ // If only interim results were processed in this event batch, ensure onTranscript is called
229
+ if (_this.interimTranscript && !event.results[event.results.length - 1].isFinal) {
230
+ if (_this.onTranscript) {
231
+ _this.onTranscript(null, _this.interimTranscript);
232
+ }
233
+ }
234
+ };
235
+ this.recognition.onerror = function(event) {
236
+ console.error('Speech recognition error:', event.error);
237
+ var oldIsRecognizing = _this.isRecognizing;
238
+ _this.isRecognizing = false;
239
+ _this.finalTranscript = ''; // Clear transcript on error
240
+ _this.interimTranscript = '';
241
+ if (_this.onTranscript) _this.onTranscript('', ''); // Clear display
242
+ if (oldIsRecognizing && _this.onRecognitionActive) _this.onRecognitionActive(false);
243
+ // Automatically restart if it's an 'aborted' or 'no-speech' error
244
+ if (event.error === 'aborted' || event.error === 'no-speech') {
245
+ console.log('Restarting speech recognition due to inactivity or abort.');
246
+ // Don't call startRecognition directly, let onend handle it if continuous
247
+ }
248
+ };
249
+ this.recognition.onend = function() {
250
+ var oldIsRecognizing = _this.isRecognizing;
251
+ _this.isRecognizing = false;
252
+ console.log('Speech recognition ended.');
253
+ _this.finalTranscript = ''; // Clear transcript on end
254
+ _this.interimTranscript = '';
255
+ if (_this.onTranscript) _this.onTranscript('', ''); // Clear display
256
+ if (oldIsRecognizing && _this.onRecognitionActive) _this.onRecognitionActive(false);
257
+ // If it ended and continuous is true, restart it.
258
+ // This handles cases where the browser might stop it.
259
+ if (_this.recognition.continuous) {
260
+ console.log('Continuous mode: Restarting speech recognition.');
261
+ _this.startRecognition(); // startRecognition already resets transcripts
262
+ }
263
+ };
264
+ } else {
265
+ console.warn('Web Speech API is not supported in this browser.');
266
+ }
267
+ }
268
+ _create_class(SpeechManager, [
269
+ {
270
+ key: "startRecognition",
271
+ value: function startRecognition() {
272
+ var _this = this;
273
+ if (this.recognition && !this.isRecognizing) {
274
+ try {
275
+ this.finalTranscript = ''; // Reset transcript
276
+ this.interimTranscript = '';
277
+ this.recognition.start();
278
+ } catch (e) {
279
+ console.error("Error starting speech recognition:", e);
280
+ // This can happen if it's already started or due to permissions
281
+ if (e.name === 'InvalidStateError' && this.isRecognizing) {
282
+ // Already started, do nothing
283
+ } else {
284
+ // Attempt to restart if it fails for other reasons (e.g. after an error)
285
+ setTimeout(function() {
286
+ return _this.startRecognition();
287
+ }, 500);
288
+ }
289
+ }
290
+ }
291
+ }
292
+ },
293
+ {
294
+ key: "stopRecognition",
295
+ value: function stopRecognition() {
296
+ if (this.recognition && this.isRecognizing) {
297
+ this.recognition.stop();
298
+ }
299
+ }
300
+ },
301
+ {
302
+ key: "requestPermissionAndStart",
303
+ value: // Call this on user interaction to request microphone permission
304
+ function requestPermissionAndStart() {
305
+ var _this = this;
306
+ return _async_to_generator(function() {
307
+ var err;
308
+ return _ts_generator(this, function(_state) {
309
+ switch(_state.label){
310
+ case 0:
311
+ if (!_this.recognition) {
312
+ console.log("Speech recognition not supported.");
313
+ return [
314
+ 2
315
+ ];
316
+ }
317
+ _state.label = 1;
318
+ case 1:
319
+ _state.trys.push([
320
+ 1,
321
+ 3,
322
+ ,
323
+ 4
324
+ ]);
325
+ // Attempt to get microphone access (this might prompt the user)
326
+ return [
327
+ 4,
328
+ navigator.mediaDevices.getUserMedia({
329
+ audio: true
330
+ })
331
+ ];
332
+ case 2:
333
+ _state.sent();
334
+ console.log("Microphone permission granted.");
335
+ _this.startRecognition();
336
+ return [
337
+ 3,
338
+ 4
339
+ ];
340
+ case 3:
341
+ err = _state.sent();
342
+ console.error("Microphone permission denied or error:", err);
343
+ if (_this.onTranscript) {
344
+ _this.onTranscript("Microphone access denied. Please allow microphone access in your browser settings.", "");
345
+ }
346
+ return [
347
+ 3,
348
+ 4
349
+ ];
350
+ case 4:
351
+ return [
352
+ 2
353
+ ];
354
+ }
355
+ });
356
+ })();
357
+ }
358
+ }
359
+ ]);
360
+ return SpeechManager;
361
+ }();
assets/Stan.gltf ADDED
The diff for this file is too large to render. See raw diff
 
assets/siteOGImage.jpg ADDED

Git LFS Details

  • SHA256: d4ce6d79769909b5e14d82ab06a1b09711533dd6183db3b669444b6a39af0748
  • Pointer size: 131 Bytes
  • Size of remote file: 444 kB
audioManager.js ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Basic Web Audio API Sound Manager
2
+ function _class_call_check(instance, Constructor) {
3
+ if (!(instance instanceof Constructor)) {
4
+ throw new TypeError("Cannot call a class as a function");
5
+ }
6
+ }
7
+ function _defineProperties(target, props) {
8
+ for(var i = 0; i < props.length; i++){
9
+ var descriptor = props[i];
10
+ descriptor.enumerable = descriptor.enumerable || false;
11
+ descriptor.configurable = true;
12
+ if ("value" in descriptor) descriptor.writable = true;
13
+ Object.defineProperty(target, descriptor.key, descriptor);
14
+ }
15
+ }
16
+ function _create_class(Constructor, protoProps, staticProps) {
17
+ if (protoProps) _defineProperties(Constructor.prototype, protoProps);
18
+ if (staticProps) _defineProperties(Constructor, staticProps);
19
+ return Constructor;
20
+ }
21
+ export var AudioManager = /*#__PURE__*/ function() {
22
+ "use strict";
23
+ function AudioManager() {
24
+ _class_call_check(this, AudioManager);
25
+ // Use '||' for broader browser compatibility, though 'webkit' is largely legacy
26
+ var AudioContext = window.AudioContext || window.webkitAudioContext;
27
+ this.audioCtx = null;
28
+ this.isInitialized = false;
29
+ this.lastClickTime = 0;
30
+ this.clickInterval = 200; // Milliseconds between clicks for rhythm
31
+ if (AudioContext) {
32
+ try {
33
+ this.audioCtx = new AudioContext();
34
+ this.isInitialized = true;
35
+ console.log("AudioContext created successfully.");
36
+ } catch (e) {
37
+ console.error("Error creating AudioContext:", e);
38
+ }
39
+ } else {
40
+ console.warn("Web Audio API is not supported in this browser.");
41
+ }
42
+ }
43
+ _create_class(AudioManager, [
44
+ {
45
+ // Resume audio context after user interaction (required by many browsers)
46
+ key: "resumeContext",
47
+ value: function resumeContext() {
48
+ if (this.audioCtx && this.audioCtx.state === 'suspended') {
49
+ this.audioCtx.resume().then(function() {
50
+ console.log("AudioContext resumed successfully.");
51
+ }).catch(function(e) {
52
+ return console.error("Error resuming AudioContext:", e);
53
+ });
54
+ }
55
+ }
56
+ },
57
+ {
58
+ key: "playInteractionClickSound",
59
+ value: function playInteractionClickSound() {
60
+ if (!this.isInitialized || !this.audioCtx || this.audioCtx.state !== 'running') return;
61
+ var internalCurrentTime = this.audioCtx.currentTime;
62
+ // Check if enough time has passed since the last click
63
+ if (internalCurrentTime - this.lastClickTime < this.clickInterval / 1000) {
64
+ return; // Too soon for the next click
65
+ }
66
+ this.lastClickTime = internalCurrentTime;
67
+ var oscillator = this.audioCtx.createOscillator();
68
+ var gainNode = this.audioCtx.createGain();
69
+ oscillator.connect(gainNode);
70
+ gainNode.connect(this.audioCtx.destination);
71
+ oscillator.type = 'sine'; // Softer waveform for a 'tic'
72
+ oscillator.frequency.setValueAtTime(1200, this.audioCtx.currentTime); // Lowered base pitch
73
+ // A very quick pitch drop can make it sound more 'clicky'
74
+ oscillator.frequency.exponentialRampToValueAtTime(600, this.audioCtx.currentTime + 0.01); // Lowered pitch drop target
75
+ var clickVolume = 0.08; // Increased volume slightly
76
+ gainNode.gain.setValueAtTime(0, this.audioCtx.currentTime); // Start silent for a clean attack
77
+ gainNode.gain.linearRampToValueAtTime(clickVolume, this.audioCtx.currentTime + 0.003); // Very fast attack
78
+ gainNode.gain.exponentialRampToValueAtTime(0.0001, this.audioCtx.currentTime + 0.005); // Keep decay short for 'tic'
79
+ oscillator.start(this.audioCtx.currentTime);
80
+ oscillator.stop(this.audioCtx.currentTime + 0.005); // Match decay duration
81
+ }
82
+ }
83
+ ]);
84
+ return AudioManager;
85
+ }();
game.js ADDED
The diff for this file is too large to render. See raw diff
 
index.html CHANGED
@@ -1,19 +1,57 @@
1
- <!doctype html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- Also don't forget to check the
15
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
16
- </p>
17
- </div>
18
- </body>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  </html>
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>3D Model Playground</title>
7
+ <link rel="stylesheet" href="styles.css">
8
+
9
+ <!-- Primary Meta Tags -->
10
+ <meta name="title" content="3D Model Playground">
11
+ <meta name="description" content="Control 3D models with hand gestures & voice commands">
12
+
13
+ <!-- Open Graph / Facebook -->
14
+ <meta property="og:type" content="website">
15
+ <meta property="og:url" content="https://collidingscopes.github.io/3d-model-playground/">
16
+ <meta property="og:title" content="3D Model Playground">
17
+ <meta property="og:description" content="Control 3D models with hand gestures & voice commands">
18
+ <meta property="og:image" content="https://raw.githubusercontent.com/collidingScopes/3d-model-playground/main/assets/siteOGImage.jpg">
19
+
20
+ <!-- Twitter -->
21
+ <meta property="twitter:card" content="summary_large_image">
22
+ <meta property="twitter:url" content="https://collidingscopes.github.io/3d-model-playground/">
23
+ <meta property="twitter:title" content="3D Model Playground">
24
+ <meta property="twitter:description" content="Control 3D models with hand gestures & voice commands">
25
+ <meta property="twitter:image" content="https://raw.githubusercontent.com/collidingScopes/3d-model-playground/main/assets/siteOGImage.jpg">
26
+
27
+ <script defer src="https://cloud.umami.is/script.js" data-website-id="eb59c81c-27cb-4e1d-9e8c-bfbe70c48cd9"></script>
28
+ <script type="importmap">
29
+ {
30
+ "imports": {
31
+ "three": "https://unpkg.com/[email protected]/build/three.module.js",
32
+ "three/examples/": "https://unpkg.com/[email protected]/examples/jsm/",
33
+ "three/loaders/": "https://unpkg.com/[email protected]/examples/jsm/loaders/"
34
+ }
35
+ }
36
+ </script>
37
+ </head>
38
+ <body style="width: 100%; height: 100%; overflow: hidden; margin: 0;">
39
+ <div id="renderDiv" style="width: 100%; height: 100%; margin: 0;">
40
+ <div id="instruction-text" class="text-box"></div>
41
+ <div id="video-link" class="text-box">
42
+ <a href="https://youtu.be/_I1E44Fp1Es?si=lR2otqR_-ZGdIGXT" target="_blank">Video Demo</a>
43
+ </div>
44
+ <div id="social-links" class="text-box">
45
+ <a href="https://www.x.com/measure_plan/" target="_blank">Twitter</a><br>
46
+ <a href="https://www.instagram.com/stereo.drift/" target="_blank">Instagram</a><br>
47
+ <a href="https://www.youtube.com/@funwithcomputervision" target="_blank">Youtube</a>
48
+ </div>
49
+ <div id="coffee-link" class="text-box">
50
+ <span id="logo">🪬</span><br>
51
+ <a href="https://www.funwithcomputervision.com/" target="_blank">code & tutorials here</a>
52
+ </div>
53
+ </div>
54
+ <script type="module" src="main.js"></script>
55
+
56
+ </body>
57
  </html>
main.js ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Game } from './game.js';
2
+ // Get the render target div
3
+ var renderDiv = document.getElementById('renderDiv');
4
+ // Check if renderDiv exists
5
+ if (!renderDiv) {
6
+ console.error('Fatal Error: renderDiv element not found.');
7
+ } else {
8
+ // Initialize the game with the render target
9
+ var game = new Game(renderDiv);
10
+ // Start the game
11
+ game.start(); // The actual setup happens async within the Game class constructor
12
+ }
styles.css ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .text-box {
2
+ padding: 8px 15px;
3
+ background-color: rgba(255, 255, 255, 0.9);
4
+ color: black;
5
+ border-radius: 4px;
6
+ font-family: "Arial", "Helvetica Neue", Helvetica, sans-serif;
7
+ border: 2px solid black;
8
+ box-shadow: 3px 3px 0px black;
9
+ font-size: clamp(13px, 2vw, 15px);
10
+ text-align: center;
11
+ z-index: 200;
12
+ opacity: 1;
13
+ transition: opacity 0.3s ease-in-out, bottom 0.3s ease-in-out, box-shadow 0.2s ease;
14
+ }
15
+
16
+ #instruction-text {
17
+ position: absolute;
18
+ bottom: 10px;
19
+ left: 50%;
20
+ transform: translateX(-50%);
21
+ pointer-events: none;
22
+ }
23
+
24
+ #social-links {
25
+ position: absolute;
26
+ bottom: 10px;
27
+ left: 10px;
28
+ }
29
+
30
+ #coffee-link {
31
+ position: absolute;
32
+ bottom: 10px;
33
+ right: 10px;
34
+ }
35
+
36
+ #video-link {
37
+ position: absolute;
38
+ top: 10px;
39
+ left: 10px;
40
+ }
41
+
42
+ #logo {
43
+ font-size: 2em;
44
+ }