jbilcke-hf HF Staff commited on
Commit
d7edecf
·
1 Parent(s): 2ecfc57

working on the chat system

Browse files
PROMPT_CONTEXT.md CHANGED
@@ -1,12 +1,14 @@
1
- For some context:
2
 
3
- our app is an AI video generation platform, where people use the frontend app (written in Flutter) to virtually "search" for video (the video synopsis and their content are generated on the fly using AI).
4
 
5
- Note that this uses a custom API written in Python, with a WebSockets communication.
6
 
7
- To go back to the Flutter app, when the user open a thumbnail card after doing such generative AI search, it opens a full view for a video (with a player, title, description, chat section etc and a special search bar in the top header that allows to edit the current video's theme).
8
 
 
9
 
10
- Task to perform: <fill your demand here>
 
11
 
12
  Note: For the task to be validated, running the shell command "flutter build web" must succeeed.
 
1
+ GENERAL CONTEXT:
2
 
3
+ This project is an app where users can generate videos using AI. What is interesting is that both search results are generated (so there is no actual search in a DB, instead a LLM hallucinate search result items, simulation a video platform à la YouTube), but also the video streams (a video is composed of an infinite stream of a few seconds long MP4 clips, that are also generated using AI, using a fast generative model that works in nearly real-time, eg it takes 4s to generate 2s of footage).
4
 
5
+ The architecture is simple: a Flutter frontend UI with two main view (home_screen.dart for search, video_screen.dart for the ifinite video stream player). The frontend UI talks to a Python API (see api.py) using WebSockets, as we have various real-time communication needs (chat, streaming of MP4 chunks etc). This Python API is responsible for performing the actual calls to the generative video model and the LLM model (those are external servers hosted on Hugging Face, but explaining how they work is outside the scope of this documentation).
6
 
7
+ There is a simulator integrated, which evolves a description (video prompt) over time, using a LLM.
8
 
9
+ Users can be anonymous, but if they connect using a Hugging Face API key, they get some extra perks.
10
 
11
+ TASK:
12
+ <fill your demand here>
13
 
14
  Note: For the task to be validated, running the shell command "flutter build web" must succeeed.
api.py CHANGED
@@ -155,6 +155,8 @@ async def websocket_handler(request: web.Request) -> web.WebSocketResponse:
155
  request_type = 'video'
156
  elif action == 'search':
157
  request_type = 'search'
 
 
158
 
159
  # Record the request for metrics
160
  await metrics_tracker.record_request(user_id, client_ip, request_type, user_role)
@@ -176,6 +178,8 @@ async def websocket_handler(request: web.Request) -> web.WebSocketResponse:
176
  await user_session.video_queue.put(data)
177
  elif action == 'search':
178
  await user_session.search_queue.put(data)
 
 
179
  else:
180
  await user_session.process_generic_request(data)
181
 
 
155
  request_type = 'video'
156
  elif action == 'search':
157
  request_type = 'search'
158
+ elif action == 'simulate':
159
+ request_type = 'simulation'
160
 
161
  # Record the request for metrics
162
  await metrics_tracker.record_request(user_id, client_ip, request_type, user_role)
 
178
  await user_session.video_queue.put(data)
179
  elif action == 'search':
180
  await user_session.search_queue.put(data)
181
+ elif action == 'simulate':
182
+ await user_session.simulation_queue.put(data)
183
  else:
184
  await user_session.process_generic_request(data)
185
 
api_core.py CHANGED
@@ -451,6 +451,126 @@ A video can be anything from a tutorial, webcam, trailer, movie, live stream etc
451
  except Exception as e:
452
  logger.error(f"Error generating caption: {str(e)}")
453
  return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
 
455
 
456
  def get_config_value(self, role: UserRole, field: str, options: dict = None) -> Any:
@@ -696,9 +816,9 @@ Your caption:"""
696
  elif orientation == 'LANDSCAPE' and height > width:
697
  # Swap height and width for landscape orientation
698
  height, width = width, height
699
- #logger.info(f"Orientation: {orientation}, swapped dimensions to width={width}, height={height}, steps={num_inference_steps}, fps={frame_rate} | role: {user_role}")
700
  else:
701
- #logger.info(f"Orientation: {orientation}, using original dimensions width={width}, height={height}, steps={num_inference_steps}, fps={frame_rate} | role: {user_role}")
702
  pass
703
 
704
  # Generate the video with standard settings
 
451
  except Exception as e:
452
  logger.error(f"Error generating caption: {str(e)}")
453
  return ""
454
+
455
+ async def simulate(self, original_title: str, original_description: str,
456
+ current_description: str, condensed_history: str,
457
+ evolution_count: int = 0, chat_messages: str = '') -> dict:
458
+ """
459
+ Simulate a video by evolving its description to create a dynamic narrative.
460
+
461
+ Args:
462
+ original_title: The original video title
463
+ original_description: The original video description
464
+ current_description: The current description (last evolved or original if first evolution)
465
+ condensed_history: A condensed summary of previous scene developments
466
+ evolution_count: How many times the simulation has already evolved
467
+ chat_messages: Chat messages from users to incorporate into the simulation
468
+
469
+ Returns:
470
+ A dictionary containing the evolved description and updated condensed history
471
+ """
472
+ try:
473
+ # Determine if this is the first simulation
474
+ is_first_simulation = evolution_count == 0 or not condensed_history
475
+
476
+ logger.info(f"simulate(): is_first_simulation={is_first_simulation}")
477
+
478
+ # Create an appropriate prompt based on whether this is the first simulation
479
+ chat_section = ""
480
+ if chat_messages:
481
+ chat_section = f"""
482
+ People are watching this content right now and have shared their thoughts. Like a game master, please take their feedback as input to adjust the story and/or the scene. Here are their messages:
483
+
484
+ {chat_messages}
485
+ """
486
+
487
+ if is_first_simulation:
488
+ prompt = f"""You are tasked with evolving the narrative for a video titled: "{original_title}"
489
+
490
+ Original description:
491
+ {original_description}
492
+ {chat_section}
493
+
494
+ Instructions:
495
+ 1. Imagine the next logical scene or development that would follow this description.
496
+ 2. Create a compelling new description (200-300 words) that builds on the original but introduces new elements, developments, or perspectives.
497
+ 3. Maintain the original style, tone, and setting.
498
+ 4. If viewers have shared messages, consider their input and incorporate relevant suggestions or reactions into your narrative evolution.
499
+ 5. Also create a brief "scene history" (50-75 words) that summarizes what has happened so far.
500
+
501
+ Return your response in this format:
502
+ EVOLVED_DESCRIPTION: [your new evolved description here]
503
+ CONDENSED_HISTORY: [your scene history summary]"""
504
+ else:
505
+ prompt = f"""You are tasked with continuing to evolve the narrative for a video titled: "{original_title}"
506
+
507
+ Original description:
508
+ {original_description}
509
+
510
+ Condensed history of scenes so far:
511
+ {condensed_history}
512
+
513
+ Current description (most recent scene):
514
+ {current_description}
515
+ {chat_section}
516
+
517
+ Instructions:
518
+ 1. Imagine the next logical scene or development that would follow the current description.
519
+ 2. Create a compelling new description (200-300 words) that builds on the narrative but introduces new elements, developments, or perspectives.
520
+ 3. Maintain consistency with the previous scenes while advancing the story.
521
+ 4. If viewers have shared messages, consider their input and incorporate relevant suggestions or reactions into your narrative evolution.
522
+ 5. Also update the condensed history (50-75 words) to include this new development.
523
+
524
+ Return your response in this format:
525
+ EVOLVED_DESCRIPTION: [your new evolved description here]
526
+ CONDENSED_HISTORY: [your updated scene history summary]"""
527
+
528
+ # Generate the evolved description
529
+ response = await asyncio.get_event_loop().run_in_executor(
530
+ None,
531
+ lambda: self.inference_client.text_generation(
532
+ prompt,
533
+ model=TEXT_MODEL,
534
+ max_new_tokens=200,
535
+ temperature=0.7
536
+ )
537
+ )
538
+
539
+ # Extract the evolved description and condensed history from the response
540
+ evolved_description = ""
541
+ new_condensed_history = ""
542
+
543
+ # Parse the response
544
+ if "EVOLVED_DESCRIPTION:" in response and "CONDENSED_HISTORY:" in response:
545
+ parts = response.split("CONDENSED_HISTORY:")
546
+ if len(parts) >= 2:
547
+ desc_part = parts[0].strip()
548
+ if "EVOLVED_DESCRIPTION:" in desc_part:
549
+ evolved_description = desc_part.split("EVOLVED_DESCRIPTION:", 1)[1].strip()
550
+ new_condensed_history = parts[1].strip()
551
+
552
+ # If parsing failed, use some fallbacks
553
+ if not evolved_description:
554
+ evolved_description = current_description
555
+ logger.warning(f"Failed to parse evolved description, using current description as fallback")
556
+
557
+ if not new_condensed_history and condensed_history:
558
+ new_condensed_history = condensed_history
559
+ logger.warning(f"Failed to parse condensed history, using current history as fallback")
560
+ elif not new_condensed_history:
561
+ new_condensed_history = f"The video begins with {original_title}: {original_description[:100]}..."
562
+
563
+ return {
564
+ "evolved_description": evolved_description,
565
+ "condensed_history": new_condensed_history
566
+ }
567
+
568
+ except Exception as e:
569
+ logger.error(f"Error simulating video: {str(e)}")
570
+ return {
571
+ "evolved_description": current_description,
572
+ "condensed_history": condensed_history or f"The video shows {original_title}."
573
+ }
574
 
575
 
576
  def get_config_value(self, role: UserRole, field: str, options: dict = None) -> Any:
 
816
  elif orientation == 'LANDSCAPE' and height > width:
817
  # Swap height and width for landscape orientation
818
  height, width = width, height
819
+ logger.info(f"generate_video() Orientation: {orientation}, swapped dimensions to width={width}, height={height}, steps={num_inference_steps}, fps={frame_rate} | role: {user_role}")
820
  else:
821
+ logger.info(f"generate_video() Orientation: {orientation}, using original dimensions width={width}, height={height}, steps={num_inference_steps}, fps={frame_rate} | role: {user_role}")
822
  pass
823
 
824
  # Generate the video with standard settings
api_session.py CHANGED
@@ -24,19 +24,22 @@ class UserSession:
24
  self.chat_queue = asyncio.Queue()
25
  self.video_queue = asyncio.Queue()
26
  self.search_queue = asyncio.Queue()
 
27
 
28
  # Track request counts and rate limits
29
  self.request_counts = {
30
  'chat': 0,
31
  'video': 0,
32
- 'search': 0
 
33
  }
34
 
35
  # Last request timestamps for rate limiting
36
  self.last_request_times = {
37
  'chat': time.time(),
38
  'video': time.time(),
39
- 'search': time.time()
 
40
  }
41
 
42
  # Session creation time
@@ -50,7 +53,8 @@ class UserSession:
50
  self.background_tasks = [
51
  asyncio.create_task(self._process_chat_queue()),
52
  asyncio.create_task(self._process_video_queue()),
53
- asyncio.create_task(self._process_search_queue())
 
54
  ]
55
  logger.info(f"Started session for user {self.user_id} with role {self.user_role}")
56
 
@@ -78,9 +82,13 @@ class UserSession:
78
  result = await self.shared_api.handle_chat_message(data, self.ws)
79
  elif data['action'] == 'leave_chat':
80
  result = await self.shared_api.handle_leave_chat(data, self.ws)
81
- # Handle thumbnail requests as chat requests for immediate processing
82
  elif data['action'] == 'generate_video_thumbnail':
83
- result = await self._handle_thumbnail_request(data)
 
 
 
 
84
  else:
85
  raise ValueError(f"Unknown chat action: {data['action']}")
86
 
@@ -262,6 +270,82 @@ class UserSession:
262
  if 'search_queue' in self.__dict__:
263
  self.search_queue.task_done()
264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  async def process_generic_request(self, data: dict) -> None:
266
  """Handle general requests that don't fit into specialized queues"""
267
  try:
@@ -310,7 +394,9 @@ class UserSession:
310
  'caption': caption
311
  })
312
 
313
- elif action == 'generate_thumbnail' or action == 'generate_video_thumbnail':
 
 
314
  title = data.get('title', '') or data.get('params', {}).get('title', '')
315
  description = data.get('description', '') or data.get('params', {}).get('description', '')
316
  video_prompt_prefix = data.get('video_prompt_prefix', '') or data.get('params', {}).get('video_prompt_prefix', '')
@@ -345,61 +431,56 @@ class UserSession:
345
  title, description, video_prompt_prefix, options, self.user_role
346
  )
347
 
348
- await self.ws.send_json({
349
- 'action': action,
350
- 'requestId': request_id,
351
- 'success': True,
352
- 'thumbnail': thumbnail_data,
353
- })
354
- except Exception as e:
355
- logger.error(f"Error generating thumbnail: {str(e)}")
356
- await self.ws.send_json(error_response(f"Thumbnail generation failed: {str(e)}"))
357
-
358
- elif action == 'old_generate_thumbnail' or action == 'generate_thumbnail':
359
- # Redirect to video thumbnail generation instead of static image
360
- title = data.get('params', {}).get('title')
361
- description = data.get('params', {}).get('description')
362
-
363
- if not title or not description:
364
- await self.ws.send_json(error_response('Missing title or description'))
365
- return
366
-
367
- # Use the video thumbnail function instead
368
- options = {
369
- 'width': 512,
370
- 'height': 288,
371
- 'thumbnail': True,
372
- 'video_id': f"thumbnail-{request_id}"
373
- }
374
-
375
- try:
376
- thumbnail = await self.shared_api.generate_video_thumbnail(
377
- title, description, "", options, self.user_role
378
- )
379
-
380
- # Check thumbnail is not empty
381
- if thumbnail is None or thumbnail == "":
382
  await self.ws.send_json({
383
  'action': action,
384
  'requestId': request_id,
385
  'success': True,
386
- 'thumbnailUrl': ""
387
  })
388
  else:
 
389
  await self.ws.send_json({
390
  'action': action,
391
  'requestId': request_id,
392
  'success': True,
393
- 'thumbnailUrl': thumbnail
394
  })
395
  except Exception as e:
396
- logger.error(f"Error generating video thumbnail: {str(e)}")
397
- await self.ws.send_json({
398
- 'action': action,
399
- 'requestId': request_id,
400
- 'success': True, # Still return success to avoid client errors
401
- 'thumbnailUrl': "" # But with empty thumbnail
402
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
 
404
  else:
405
  await self.ws.send_json(error_response(f'Unknown action: {action}'))
@@ -473,7 +554,8 @@ class SessionManager:
473
  'requests': {
474
  'chat': 0,
475
  'video': 0,
476
- 'search': 0
 
477
  }
478
  }
479
 
@@ -482,5 +564,6 @@ class SessionManager:
482
  stats['requests']['chat'] += session.request_counts['chat']
483
  stats['requests']['video'] += session.request_counts['video']
484
  stats['requests']['search'] += session.request_counts['search']
 
485
 
486
  return stats
 
24
  self.chat_queue = asyncio.Queue()
25
  self.video_queue = asyncio.Queue()
26
  self.search_queue = asyncio.Queue()
27
+ self.simulation_queue = asyncio.Queue() # New queue for description evolution
28
 
29
  # Track request counts and rate limits
30
  self.request_counts = {
31
  'chat': 0,
32
  'video': 0,
33
+ 'search': 0,
34
+ 'simulation': 0 # New counter for simulation requests
35
  }
36
 
37
  # Last request timestamps for rate limiting
38
  self.last_request_times = {
39
  'chat': time.time(),
40
  'video': time.time(),
41
+ 'search': time.time(),
42
+ 'simulation': time.time() # New timestamp for simulation requests
43
  }
44
 
45
  # Session creation time
 
53
  self.background_tasks = [
54
  asyncio.create_task(self._process_chat_queue()),
55
  asyncio.create_task(self._process_video_queue()),
56
+ asyncio.create_task(self._process_search_queue()),
57
+ asyncio.create_task(self._process_simulation_queue()) # New worker for simulation requests
58
  ]
59
  logger.info(f"Started session for user {self.user_id} with role {self.user_role}")
60
 
 
82
  result = await self.shared_api.handle_chat_message(data, self.ws)
83
  elif data['action'] == 'leave_chat':
84
  result = await self.shared_api.handle_leave_chat(data, self.ws)
85
+ # Redirect thumbnail requests to process_generic_request for consistent handling
86
  elif data['action'] == 'generate_video_thumbnail':
87
+ # Pass to the generic request handler to maintain consistent logic
88
+ await self.process_generic_request(data)
89
+ # Skip normal response handling since process_generic_request already sends a response
90
+ self.chat_queue.task_done()
91
+ continue
92
  else:
93
  raise ValueError(f"Unknown chat action: {data['action']}")
94
 
 
270
  if 'search_queue' in self.__dict__:
271
  self.search_queue.task_done()
272
 
273
+ async def _process_simulation_queue(self):
274
+ """Dedicated queue for video simulation requests"""
275
+ while True:
276
+ try:
277
+ data = await self.simulation_queue.get()
278
+ request_id = data.get('requestId')
279
+
280
+ # Extract parameters from the request
281
+ video_id = data.get('video_id', '')
282
+ original_title = data.get('original_title', '')
283
+ original_description = data.get('original_description', '')
284
+ current_description = data.get('current_description', '')
285
+ condensed_history = data.get('condensed_history', '')
286
+ evolution_count = data.get('evolution_count', 0)
287
+ chat_messages = data.get('chat_messages', '')
288
+
289
+ logger.info(f"Processing video simulation for user {self.user_id}, video_id={video_id}, evolution_count={evolution_count}")
290
+
291
+ # Validate required parameters
292
+ if not original_title or not original_description or not current_description:
293
+ result = {
294
+ 'action': 'simulate',
295
+ 'requestId': request_id,
296
+ 'success': False,
297
+ 'error': 'Missing required parameters'
298
+ }
299
+ else:
300
+ try:
301
+ # Call the simulate method in the API
302
+ simulation_result = await self.shared_api.simulate(
303
+ original_title=original_title,
304
+ original_description=original_description,
305
+ current_description=current_description,
306
+ condensed_history=condensed_history,
307
+ evolution_count=evolution_count,
308
+ chat_messages=chat_messages
309
+ )
310
+
311
+ result = {
312
+ 'action': 'simulate',
313
+ 'requestId': request_id,
314
+ 'success': True,
315
+ 'evolved_description': simulation_result['evolved_description'],
316
+ 'condensed_history': simulation_result['condensed_history']
317
+ }
318
+ except Exception as e:
319
+ logger.error(f"Error simulating video for user {self.user_id}, video_id={video_id}: {str(e)}")
320
+ result = {
321
+ 'action': 'simulate',
322
+ 'requestId': request_id,
323
+ 'success': False,
324
+ 'error': f'Simulation error: {str(e)}'
325
+ }
326
+
327
+ await self.ws.send_json(result)
328
+
329
+ # Update metrics
330
+ self.request_counts['simulation'] += 1
331
+ self.last_request_times['simulation'] = time.time()
332
+
333
+ except Exception as e:
334
+ logger.error(f"Error in simulation queue processor for user {self.user_id}: {str(e)}")
335
+ try:
336
+ error_response = {
337
+ 'action': 'simulate',
338
+ 'requestId': data.get('requestId') if 'data' in locals() else None,
339
+ 'success': False,
340
+ 'error': f'Internal server error: {str(e)}'
341
+ }
342
+ await self.ws.send_json(error_response)
343
+ except Exception as send_error:
344
+ logger.error(f"Error sending error response: {send_error}")
345
+ finally:
346
+ if 'simulation_queue' in self.__dict__:
347
+ self.simulation_queue.task_done()
348
+
349
  async def process_generic_request(self, data: dict) -> None:
350
  """Handle general requests that don't fit into specialized queues"""
351
  try:
 
394
  'caption': caption
395
  })
396
 
397
+ # evolve_description is now handled by the dedicated simulation queue processor
398
+
399
+ elif action == 'generate_video_thumbnail':
400
  title = data.get('title', '') or data.get('params', {}).get('title', '')
401
  description = data.get('description', '') or data.get('params', {}).get('description', '')
402
  video_prompt_prefix = data.get('video_prompt_prefix', '') or data.get('params', {}).get('video_prompt_prefix', '')
 
431
  title, description, video_prompt_prefix, options, self.user_role
432
  )
433
 
434
+ # Respond with appropriate format based on the parameter names used in the request
435
+ if 'thumbnailUrl' in data or 'thumbnailUrl' in data.get('params', {}):
436
+ # Legacy format using thumbnailUrl
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  await self.ws.send_json({
438
  'action': action,
439
  'requestId': request_id,
440
  'success': True,
441
+ 'thumbnailUrl': thumbnail_data or "",
442
  })
443
  else:
444
+ # New format using thumbnail
445
  await self.ws.send_json({
446
  'action': action,
447
  'requestId': request_id,
448
  'success': True,
449
+ 'thumbnail': thumbnail_data,
450
  })
451
  except Exception as e:
452
+ logger.error(f"Error generating thumbnail: {str(e)}")
453
+ await self.ws.send_json(error_response(f"Thumbnail generation failed: {str(e)}"))
454
+
455
+ # Handle deprecated thumbnail actions
456
+ elif action == 'generate_thumbnail' or action == 'old_generate_thumbnail':
457
+ # Redirect to video thumbnail generation
458
+ logger.warning(f"Deprecated thumbnail action '{action}' used, redirecting to generate_video_thumbnail")
459
+
460
+ # Extract parameters
461
+ title = data.get('title', '') or data.get('params', {}).get('title', '')
462
+ description = data.get('description', '') or data.get('params', {}).get('description', '')
463
+
464
+ if not title or not description:
465
+ await self.ws.send_json(error_response('Missing title or description'))
466
+ return
467
+
468
+ # Create a new request with the correct action
469
+ new_request = {
470
+ 'action': 'generate_video_thumbnail',
471
+ 'requestId': request_id,
472
+ 'title': title,
473
+ 'description': description,
474
+ 'options': {
475
+ 'width': 512,
476
+ 'height': 288,
477
+ 'thumbnail': True,
478
+ 'video_id': f"thumbnail-{request_id}"
479
+ }
480
+ }
481
+
482
+ # Process with the new action
483
+ await self.process_generic_request(new_request)
484
 
485
  else:
486
  await self.ws.send_json(error_response(f'Unknown action: {action}'))
 
554
  'requests': {
555
  'chat': 0,
556
  'video': 0,
557
+ 'search': 0,
558
+ 'simulation': 0
559
  }
560
  }
561
 
 
564
  stats['requests']['chat'] += session.request_counts['chat']
565
  stats['requests']['video'] += session.request_counts['video']
566
  stats['requests']['search'] += session.request_counts['search']
567
+ stats['requests']['simulation'] += session.request_counts['simulation']
568
 
569
  return stats
assets/config/aitube.yaml CHANGED
@@ -1,6 +1,6 @@
1
  ui:
2
  product_name: "#aitube2"
3
- showChatInVideoView: true
4
 
5
  render_queue:
6
  # how many clips should be stored in advance
@@ -12,6 +12,11 @@ render_queue:
12
  # start playback as soon as we have 1 video over 3 (25%)
13
  minimum_buffer_percent_to_start_playback: 5
14
 
 
 
 
 
 
15
  # it's OK to use high values here,
16
  # because some of those values are limited by the backend config,
17
  # such as the resoltuion or number of frames
 
1
  ui:
2
  product_name: "#aitube2"
3
+ showChatInVideoView: false
4
 
5
  render_queue:
6
  # how many clips should be stored in advance
 
12
  # start playback as soon as we have 1 video over 3 (25%)
13
  minimum_buffer_percent_to_start_playback: 5
14
 
15
+ simulation:
16
+ # how often the description should evolve (in seconds)
17
+ # setting to 0 disables description evolution
18
+ sim_loop_frequency_in_sec: 10
19
+
20
  # it's OK to use high values here,
21
  # because some of those values are limited by the backend config,
22
  # such as the resoltuion or number of frames
assets/config/default.yaml CHANGED
@@ -12,6 +12,11 @@ ui:
12
  # start playback as soon as we have 1 video over 4 (25%)
13
  minimum_buffer_percent_to_start_playback: 25
14
 
 
 
 
 
 
15
  video:
16
  # default negative prompt to filter harmful content
17
  default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
 
12
  # start playback as soon as we have 1 video over 4 (25%)
13
  minimum_buffer_percent_to_start_playback: 25
14
 
15
+ simulation:
16
+ # how often the description should evolve (in seconds)
17
+ # setting to 0 disables description evolution
18
+ sim_loop_frequency_in_sec: 8
19
+
20
  video:
21
  # default negative prompt to filter harmful content
22
  default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
build/web/.last_build_id CHANGED
@@ -1 +1 @@
1
- b2c2b70851545cb6ac33ffe1f33d36c9
 
1
+ 00a0a70792b04bcd7ca08cb0a04b422c
build/web/assets/assets/config/aitube.yaml CHANGED
@@ -1,6 +1,6 @@
1
  ui:
2
  product_name: "#aitube2"
3
- showChatInVideoView: true
4
 
5
  render_queue:
6
  # how many clips should be stored in advance
@@ -12,6 +12,11 @@ render_queue:
12
  # start playback as soon as we have 1 video over 3 (25%)
13
  minimum_buffer_percent_to_start_playback: 5
14
 
 
 
 
 
 
15
  # it's OK to use high values here,
16
  # because some of those values are limited by the backend config,
17
  # such as the resoltuion or number of frames
 
1
  ui:
2
  product_name: "#aitube2"
3
+ showChatInVideoView: false
4
 
5
  render_queue:
6
  # how many clips should be stored in advance
 
12
  # start playback as soon as we have 1 video over 3 (25%)
13
  minimum_buffer_percent_to_start_playback: 5
14
 
15
+ simulation:
16
+ # how often the description should evolve (in seconds)
17
+ # setting to 0 disables description evolution
18
+ sim_loop_frequency_in_sec: 10
19
+
20
  # it's OK to use high values here,
21
  # because some of those values are limited by the backend config,
22
  # such as the resoltuion or number of frames
build/web/assets/assets/config/default.yaml CHANGED
@@ -12,6 +12,11 @@ ui:
12
  # start playback as soon as we have 1 video over 4 (25%)
13
  minimum_buffer_percent_to_start_playback: 25
14
 
 
 
 
 
 
15
  video:
16
  # default negative prompt to filter harmful content
17
  default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
 
12
  # start playback as soon as we have 1 video over 4 (25%)
13
  minimum_buffer_percent_to_start_playback: 25
14
 
15
+ simulation:
16
+ # how often the description should evolve (in seconds)
17
+ # setting to 0 disables description evolution
18
+ sim_loop_frequency_in_sec: 8
19
+
20
  video:
21
  # default negative prompt to filter harmful content
22
  default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
build/web/assets/fonts/MaterialIcons-Regular.otf CHANGED
Binary files a/build/web/assets/fonts/MaterialIcons-Regular.otf and b/build/web/assets/fonts/MaterialIcons-Regular.otf differ
 
build/web/flutter_bootstrap.js CHANGED
@@ -39,6 +39,6 @@ _flutter.buildConfig = {"engineRevision":"382be0028d370607f76215a9be322e5514b263
39
 
40
  _flutter.loader.load({
41
  serviceWorkerSettings: {
42
- serviceWorkerVersion: "2592320512"
43
  }
44
  });
 
39
 
40
  _flutter.loader.load({
41
  serviceWorkerSettings: {
42
+ serviceWorkerVersion: "2056062353"
43
  }
44
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,11 +3,11 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "66845f0dd2b5279057cda185e2d06e57",
7
  "version.json": "b5eaae4fc120710a3c35125322173615",
8
  "index.html": "2677c99782b3eb75aae7c757748ccacb",
9
  "/": "2677c99782b3eb75aae7c757748ccacb",
10
- "main.dart.js": "1abc87c3f21933e9d8ac7095bfd2f625",
11
  "flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
12
  "aitube.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
@@ -23,11 +23,11 @@ const RESOURCES = {"flutter_bootstrap.js": "66845f0dd2b5279057cda185e2d06e57",
23
  "assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
24
  "assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
25
  "assets/AssetManifest.bin": "6c597105edcadb9c676bdc998c88545a",
26
- "assets/fonts/MaterialIcons-Regular.otf": "a9126745a3792756bbb88c84ed40e354",
27
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
28
  "assets/assets/config/custom.yaml": "e5c0b238b6f217f1215fbc813f093656",
29
- "assets/assets/config/aitube.yaml": "c9fa3d1bd8228652881a5409d78a5252",
30
- "assets/assets/config/default.yaml": "ba11c9ae686f1317a29bce114f0f9fc9",
31
  "canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
32
  "canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
33
  "canvaskit/canvaskit.js.symbols": "27361387bc24144b46a745f1afe92b50",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "cf2a0ca5fb914aaa2e35d2d3ca0090af",
7
  "version.json": "b5eaae4fc120710a3c35125322173615",
8
  "index.html": "2677c99782b3eb75aae7c757748ccacb",
9
  "/": "2677c99782b3eb75aae7c757748ccacb",
10
+ "main.dart.js": "fdb1b459d8d6f04d1b1cc8520b92fbeb",
11
  "flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
12
  "aitube.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
 
23
  "assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
24
  "assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
25
  "assets/AssetManifest.bin": "6c597105edcadb9c676bdc998c88545a",
26
+ "assets/fonts/MaterialIcons-Regular.otf": "06b86454c633cc9510ad85ddc0523a91",
27
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
28
  "assets/assets/config/custom.yaml": "e5c0b238b6f217f1215fbc813f093656",
29
+ "assets/assets/config/aitube.yaml": "447b2def472f3181ce07560f54d374af",
30
+ "assets/assets/config/default.yaml": "d1304586fd15839a754f53dda3dd8a44",
31
  "canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
32
  "canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
33
  "canvaskit/canvaskit.js.symbols": "27361387bc24144b46a745f1afe92b50",
build/web/index.html CHANGED
@@ -156,7 +156,7 @@
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
- <script src="flutter_bootstrap.js?v=BUILD_TIMESTAMP" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
 
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
+ <script src="flutter_bootstrap.js?v=1746736495" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
build/web/main.dart.js CHANGED
The diff for this file is too large to render. See raw diff
 
lib/config/config.dart CHANGED
@@ -127,6 +127,10 @@ class Configuration {
127
  // Default negative prompt to avoid harmful content
128
  String get defaultNegativePrompt =>
129
  _config['video']['default_negative_prompt'] ?? 'gore, sex, blood, nudity, nude, porn, erotic, worst quality, deformed, distorted, disfigured, blurry, text, watermark';
 
 
 
 
130
 
131
  // Computed properties
132
 
 
127
  // Default negative prompt to avoid harmful content
128
  String get defaultNegativePrompt =>
129
  _config['video']['default_negative_prompt'] ?? 'gore, sex, blood, nudity, nude, porn, erotic, worst quality, deformed, distorted, disfigured, blurry, text, watermark';
130
+
131
+ // Simulation settings
132
+ int get simLoopFrequencyInSec =>
133
+ _config['simulation']?['sim_loop_frequency_in_sec'] ?? 0;
134
 
135
  // Computed properties
136
 
lib/models/video_result.dart CHANGED
@@ -15,6 +15,10 @@ class VideoResult {
15
  final bool useFixedSeed;
16
  final int seed;
17
 
 
 
 
 
18
  final int views;
19
  final String createdAt;
20
 
@@ -28,6 +32,8 @@ class VideoResult {
28
  this.isLatent = true,
29
  this.useFixedSeed = false,
30
  this.seed = 0,
 
 
31
  this.views = 0,
32
  String? createdAt,
33
  }) : id = id ?? const Uuid().v4(),
@@ -44,6 +50,8 @@ class VideoResult {
44
  isLatent: json['isLatent'] as bool? ?? true,
45
  useFixedSeed: json['useFixedSeed'] as bool? ?? false,
46
  seed: json['seed'] as int? ?? 0,
 
 
47
  views: json['views'] as int? ?? 0,
48
  createdAt: json['createdAt'] as String?,
49
  );
@@ -59,6 +67,8 @@ class VideoResult {
59
  'isLatent': isLatent,
60
  'useFixedSeed': useFixedSeed,
61
  'seed': seed,
 
 
62
  'views': views,
63
  'createdAt': createdAt,
64
  };
@@ -74,6 +84,8 @@ class VideoResult {
74
  bool? isLatent,
75
  bool? useFixedSeed,
76
  int? seed,
 
 
77
  int? views,
78
  String? createdAt,
79
  }) {
@@ -87,6 +99,8 @@ class VideoResult {
87
  isLatent: isLatent ?? this.isLatent,
88
  useFixedSeed: useFixedSeed ?? this.useFixedSeed,
89
  seed: seed ?? this.seed,
 
 
90
  views: views ?? this.views,
91
  createdAt: createdAt ?? this.createdAt,
92
  );
 
15
  final bool useFixedSeed;
16
  final int seed;
17
 
18
+ // Evolved description fields for dynamic narrative
19
+ final String evolvedDescription;
20
+ final String condensedHistory;
21
+
22
  final int views;
23
  final String createdAt;
24
 
 
32
  this.isLatent = true,
33
  this.useFixedSeed = false,
34
  this.seed = 0,
35
+ this.evolvedDescription = '',
36
+ this.condensedHistory = '',
37
  this.views = 0,
38
  String? createdAt,
39
  }) : id = id ?? const Uuid().v4(),
 
50
  isLatent: json['isLatent'] as bool? ?? true,
51
  useFixedSeed: json['useFixedSeed'] as bool? ?? false,
52
  seed: json['seed'] as int? ?? 0,
53
+ evolvedDescription: json['evolvedDescription'] as String? ?? '',
54
+ condensedHistory: json['condensedHistory'] as String? ?? '',
55
  views: json['views'] as int? ?? 0,
56
  createdAt: json['createdAt'] as String?,
57
  );
 
67
  'isLatent': isLatent,
68
  'useFixedSeed': useFixedSeed,
69
  'seed': seed,
70
+ 'evolvedDescription': evolvedDescription,
71
+ 'condensedHistory': condensedHistory,
72
  'views': views,
73
  'createdAt': createdAt,
74
  };
 
84
  bool? isLatent,
85
  bool? useFixedSeed,
86
  int? seed,
87
+ String? evolvedDescription,
88
+ String? condensedHistory,
89
  int? views,
90
  String? createdAt,
91
  }) {
 
99
  isLatent: isLatent ?? this.isLatent,
100
  useFixedSeed: useFixedSeed ?? this.useFixedSeed,
101
  seed: seed ?? this.seed,
102
+ evolvedDescription: evolvedDescription ?? this.evolvedDescription,
103
+ condensedHistory: condensedHistory ?? this.condensedHistory,
104
  views: views ?? this.views,
105
  createdAt: createdAt ?? this.createdAt,
106
  );
lib/screens/home_screen.dart CHANGED
@@ -449,9 +449,12 @@ class _HomeScreenState extends State<HomeScreen> {
449
  ? Center(
450
  child: Text(
451
  _isSearching
452
- ? 'Generating videos...'
453
- : 'Start by typing a description of the video you want to generate',
454
- style: const TextStyle(color: AiTubeColors.onSurfaceVariant),
 
 
 
455
  textAlign: TextAlign.center,
456
  ),
457
  )
 
449
  ? Center(
450
  child: Text(
451
  _isSearching
452
+ ? 'Hallucinating search results using AI...'
453
+ : 'Results are generated on demand, videos rendered on the fly.',
454
+ style: const TextStyle(
455
+ color: AiTubeColors.onSurfaceVariant,
456
+ fontSize: 20
457
+ ),
458
  textAlign: TextAlign.center,
459
  ),
460
  )
lib/screens/video_screen.dart CHANGED
@@ -436,7 +436,7 @@ class _VideoScreenState extends State<VideoScreen> {
436
  if (Configuration.instance.showChatInVideoView) ...[
437
  const SizedBox(width: 16),
438
  Padding(
439
- padding: const EdgeInsets.only(right: 16),
440
  child: ChatWidget(videoId: widget.video.id),
441
  ),
442
  ],
@@ -451,7 +451,9 @@ class _VideoScreenState extends State<VideoScreen> {
451
  const SizedBox(height: 16),
452
  Expanded(
453
  child: Padding(
454
- padding: const EdgeInsets.symmetric(horizontal: 16),
 
 
455
  child: ChatWidget(
456
  videoId: widget.video.id,
457
  isCompact: true,
@@ -505,13 +507,14 @@ class _VideoScreenState extends State<VideoScreen> {
505
  style: Theme.of(context).textTheme.headlineSmall?.copyWith(
506
  color: AiTubeColors.onBackground,
507
  fontWeight: FontWeight.bold,
 
508
  ),
509
  ),
510
  ),
511
  IconButton(
512
  icon: const Icon(Icons.share, color: AiTubeColors.primary),
513
  onPressed: _shareVideo,
514
- tooltip: 'Share prompt',
515
  ),
516
  ],
517
  ),
@@ -523,29 +526,7 @@ class _VideoScreenState extends State<VideoScreen> {
523
  Column(
524
  crossAxisAlignment: CrossAxisAlignment.start,
525
  children: [
526
- // Tags
527
- if (_videoData.tags.isNotEmpty) ...[
528
- Wrap(
529
- spacing: 8,
530
- runSpacing: 8,
531
- children: _videoData.tags.map((tag) => Chip(
532
- label: Text(tag),
533
- backgroundColor: AiTubeColors.surface,
534
- labelStyle: const TextStyle(color: AiTubeColors.onSurface),
535
- )).toList(),
536
- ),
537
- const SizedBox(height: 16),
538
- ],
539
-
540
  // Description Section
541
- const Text(
542
- 'Description',
543
- style: TextStyle(
544
- color: AiTubeColors.onBackground,
545
- fontWeight: FontWeight.bold,
546
- fontSize: 18,
547
- ),
548
- ),
549
  const SizedBox(height: 8),
550
  Text(
551
  _videoData.description,
@@ -554,6 +535,7 @@ class _VideoScreenState extends State<VideoScreen> {
554
  height: 1.5,
555
  ),
556
  ),
 
557
  ],
558
  ),
559
  ],
 
436
  if (Configuration.instance.showChatInVideoView) ...[
437
  const SizedBox(width: 16),
438
  Padding(
439
+ padding: const EdgeInsets.only(left: 0, top: 16, right: 16, bottom: 4),
440
  child: ChatWidget(videoId: widget.video.id),
441
  ),
442
  ],
 
451
  const SizedBox(height: 16),
452
  Expanded(
453
  child: Padding(
454
+ padding: const EdgeInsets.symmetric(
455
+ horizontal: 16
456
+ ),
457
  child: ChatWidget(
458
  videoId: widget.video.id,
459
  isCompact: true,
 
507
  style: Theme.of(context).textTheme.headlineSmall?.copyWith(
508
  color: AiTubeColors.onBackground,
509
  fontWeight: FontWeight.bold,
510
+ fontSize: 18
511
  ),
512
  ),
513
  ),
514
  IconButton(
515
  icon: const Icon(Icons.share, color: AiTubeColors.primary),
516
  onPressed: _shareVideo,
517
+ tooltip: 'Share this creation',
518
  ),
519
  ],
520
  ),
 
526
  Column(
527
  crossAxisAlignment: CrossAxisAlignment.start,
528
  children: [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529
  // Description Section
 
 
 
 
 
 
 
 
530
  const SizedBox(height: 8),
531
  Text(
532
  _videoData.description,
 
535
  height: 1.5,
536
  ),
537
  ),
538
+ const SizedBox(height: 8),
539
  ],
540
  ),
541
  ],
lib/services/chat_service.dart CHANGED
@@ -116,6 +116,9 @@ class ChatService {
116
  color: _userColor,
117
  );
118
 
 
 
 
119
  debugPrint('ChatService: Sending message via WebSocket...');
120
  await _websocketService.sendChatMessage(message);
121
  debugPrint('ChatService: Message sent successfully');
@@ -146,11 +149,24 @@ class ChatService {
146
  }
147
  }
148
 
149
- void dispose() {
 
 
 
150
  if (_currentRoomId != null) {
151
- leaveRoom(_currentRoomId!);
 
 
 
 
152
  }
153
- _chatController.close();
 
 
 
 
 
154
  _isInitialized = false;
 
155
  }
156
  }
 
116
  color: _userColor,
117
  );
118
 
119
+ // Add message to the local stream before sending to avoid duplicates
120
+ _chatController.add(message);
121
+
122
  debugPrint('ChatService: Sending message via WebSocket...');
123
  await _websocketService.sendChatMessage(message);
124
  debugPrint('ChatService: Message sent successfully');
 
149
  }
150
  }
151
 
152
+ // This method is only for application shutdown
153
+ // Individual widgets should use leaveRoom instead
154
+ Future<void> dispose() async {
155
+ // Properly leave current room first if connected
156
  if (_currentRoomId != null) {
157
+ try {
158
+ await leaveRoom(_currentRoomId!);
159
+ } catch (e) {
160
+ debugPrint('ChatService: Error leaving room during disposal: $e');
161
+ }
162
  }
163
+
164
+ // Only close the controller if we're truly shutting down
165
+ if (!_chatController.isClosed) {
166
+ _chatController.close();
167
+ }
168
+
169
  _isInitialized = false;
170
+ debugPrint('ChatService: Successfully disposed');
171
  }
172
  }
lib/services/clip_queue/clip_queue_manager.dart CHANGED
@@ -1,12 +1,15 @@
1
  // lib/services/clip_queue/clip_queue_manager.dart
2
 
3
  import 'dart:async';
 
4
  import 'package:aitube2/config/config.dart';
5
  import 'package:flutter/foundation.dart';
6
  import 'package:collection/collection.dart';
7
  import '../../models/video_result.dart';
8
  import '../../models/video_orientation.dart';
 
9
  import '../websocket_api_service.dart';
 
10
  import '../../utils/seed.dart';
11
  import 'clip_states.dart';
12
  import 'video_clip.dart';
@@ -16,7 +19,7 @@ import 'clip_generation_handler.dart';
16
  /// Manages a queue of video clips for generation and playback
17
  class ClipQueueManager {
18
  /// The video for which clips are being generated
19
- final VideoResult video;
20
 
21
  /// WebSocket service for API communication
22
  final WebSocketApiService _websocketService;
@@ -36,9 +39,18 @@ class ClipQueueManager {
36
  /// Timer for checking the buffer state
37
  Timer? _bufferCheckTimer;
38
 
 
 
 
 
 
 
39
  /// Whether the manager is disposed
40
  bool _isDisposed = false;
41
 
 
 
 
42
  /// Stats logger
43
  final QueueStatsLogger _logger = QueueStatsLogger();
44
 
@@ -47,6 +59,12 @@ class ClipQueueManager {
47
 
48
  /// ID of the video being managed
49
  final String videoId;
 
 
 
 
 
 
50
 
51
  /// Constructor
52
  ClipQueueManager({
@@ -61,6 +79,30 @@ class ClipQueueManager {
61
  activeGenerations: _activeGenerations,
62
  onQueueUpdated: onQueueUpdated,
63
  );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  }
65
 
66
  /// Whether a new generation can be started
@@ -107,6 +149,10 @@ class ClipQueueManager {
107
  );
108
  _clipBuffer.clear();
109
 
 
 
 
 
110
  // Set initial orientation
111
  _currentOrientation = orientation ?? getOrientationFromDimensions(
112
  Configuration.instance.originalClipWidth,
@@ -130,6 +176,7 @@ class ClipQueueManager {
130
  if (_isDisposed) return;
131
 
132
  _startBufferCheck();
 
133
  await _fillBuffer();
134
  ClipQueueConstants.logEvent('Initialization complete. Buffer size: ${_clipBuffer.length}');
135
  printQueueState();
@@ -160,6 +207,127 @@ class ClipQueueManager {
160
  );
161
  ClipQueueConstants.logEvent('Started buffer check timer');
162
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
  /// Mark a specific clip as played
165
  void markClipAsPlayed(String clipId) {
@@ -193,8 +361,16 @@ class ClipQueueManager {
193
 
194
  // First ensure we have the correct buffer size
195
  while (_clipBuffer.length < Configuration.instance.renderQueueBufferSize) {
 
 
 
 
 
 
 
 
196
  final newClip = VideoClip(
197
- prompt: "${video.title}\n${video.description}",
198
  seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
199
  orientation: _currentOrientation,
200
  );
@@ -316,9 +492,18 @@ class ClipQueueManager {
316
  _clipBuffer.remove(clip);
317
  _clipHistory.add(clip);
318
 
 
 
 
 
 
 
 
 
 
319
  // Add a new pending clip with current orientation
320
  final newClip = VideoClip(
321
- prompt: "${video.title}\n${video.description}",
322
  seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
323
  orientation: _currentOrientation,
324
  );
@@ -409,6 +594,24 @@ class ClipQueueManager {
409
  // Notify listeners
410
  onQueueUpdated?.call();
411
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
 
413
  /// Print the current state of the queue
414
  void printQueueState() {
@@ -436,6 +639,7 @@ class ClipQueueManager {
436
 
437
  // Cancel all timers first
438
  _bufferCheckTimer?.cancel();
 
439
 
440
  // Complete any pending generation completers
441
  for (var clip in _clipBuffer) {
 
1
  // lib/services/clip_queue/clip_queue_manager.dart
2
 
3
  import 'dart:async';
4
+ import 'dart:math';
5
  import 'package:aitube2/config/config.dart';
6
  import 'package:flutter/foundation.dart';
7
  import 'package:collection/collection.dart';
8
  import '../../models/video_result.dart';
9
  import '../../models/video_orientation.dart';
10
+ import '../../models/chat_message.dart';
11
  import '../websocket_api_service.dart';
12
+ import '../chat_service.dart';
13
  import '../../utils/seed.dart';
14
  import 'clip_states.dart';
15
  import 'video_clip.dart';
 
19
  /// Manages a queue of video clips for generation and playback
20
  class ClipQueueManager {
21
  /// The video for which clips are being generated
22
+ VideoResult video;
23
 
24
  /// WebSocket service for API communication
25
  final WebSocketApiService _websocketService;
 
39
  /// Timer for checking the buffer state
40
  Timer? _bufferCheckTimer;
41
 
42
+ /// Timer for evolving the description
43
+ Timer? _descriptionEvolutionTimer;
44
+
45
+ /// Last time the description was evolved
46
+ DateTime _lastDescriptionEvolutionTime = DateTime.now();
47
+
48
  /// Whether the manager is disposed
49
  bool _isDisposed = false;
50
 
51
+ /// Whether the simulation is paused (controlled by video playback)
52
+ bool _isSimulationPaused = false;
53
+
54
  /// Stats logger
55
  final QueueStatsLogger _logger = QueueStatsLogger();
56
 
 
59
 
60
  /// ID of the video being managed
61
  final String videoId;
62
+
63
+ /// Evolution counter for tracking how many times we've evolved the description
64
+ int _evolutionCounter = 0;
65
+
66
+ /// Recent chat messages to include in description evolution
67
+ final List<ChatMessage> _recentChatMessages = [];
68
 
69
  /// Constructor
70
  ClipQueueManager({
 
79
  activeGenerations: _activeGenerations,
80
  onQueueUpdated: onQueueUpdated,
81
  );
82
+
83
+ // Start listening to chat messages
84
+ final chatService = ChatService();
85
+ chatService.initialize().then((_) {
86
+ chatService.joinRoom(videoId).then((_) {
87
+ chatService.chatStream.listen(_addChatMessage);
88
+ }).catchError((e) {
89
+ debugPrint('ClipQueueManager: Error joining chat room: $e');
90
+ });
91
+ }).catchError((e) {
92
+ debugPrint('ClipQueueManager: Error initializing chat service: $e');
93
+ });
94
+ }
95
+
96
+ /// Add a chat message to the recent messages list
97
+ void _addChatMessage(ChatMessage message) {
98
+ if (message.videoId == videoId) {
99
+ _recentChatMessages.add(message);
100
+ // Keep only the 10 most recent messages
101
+ if (_recentChatMessages.length > 10) {
102
+ _recentChatMessages.removeAt(0);
103
+ }
104
+ ClipQueueConstants.logEvent('Added chat message: ${message.content.substring(0, min(20, message.content.length))}...');
105
+ }
106
  }
107
 
108
  /// Whether a new generation can be started
 
149
  );
150
  _clipBuffer.clear();
151
 
152
+ // Reset evolution counter and last evolution time
153
+ _evolutionCounter = 0;
154
+ _lastDescriptionEvolutionTime = DateTime.now();
155
+
156
  // Set initial orientation
157
  _currentOrientation = orientation ?? getOrientationFromDimensions(
158
  Configuration.instance.originalClipWidth,
 
176
  if (_isDisposed) return;
177
 
178
  _startBufferCheck();
179
+ _startDescriptionEvolution();
180
  await _fillBuffer();
181
  ClipQueueConstants.logEvent('Initialization complete. Buffer size: ${_clipBuffer.length}');
182
  printQueueState();
 
207
  );
208
  ClipQueueConstants.logEvent('Started buffer check timer');
209
  }
210
+
211
+ /// Start the simulation timer
212
+ void _startDescriptionEvolution() {
213
+ // Cancel any existing timer
214
+ _descriptionEvolutionTimer?.cancel();
215
+
216
+ // Only start if simulation frequency is greater than 0
217
+ if (Configuration.instance.simLoopFrequencyInSec <= 0) {
218
+ ClipQueueConstants.logEvent('Simulation disabled (frequency is 0)');
219
+ return;
220
+ }
221
+
222
+ // Adaptive check interval - less frequent checks to reduce overhead
223
+ final checkInterval = max(3, Configuration.instance.simLoopFrequencyInSec ~/ 3);
224
+
225
+ ClipQueueConstants.logEvent('Starting simulation with check interval of $checkInterval seconds');
226
+
227
+ // Check periodically if it's time to simulate the video
228
+ _descriptionEvolutionTimer = Timer.periodic(
229
+ Duration(seconds: checkInterval),
230
+ (timer) async {
231
+ if (_isDisposed) return;
232
+
233
+ // Skip if simulation is paused (due to video playback being paused)
234
+ if (_isSimulationPaused) {
235
+ ClipQueueConstants.logEvent('Skipping simulation because it is paused');
236
+ return;
237
+ }
238
+
239
+ // Check if we're currently generating a video - if so, delay simulation
240
+ final isGenerating = _activeGenerations.isNotEmpty;
241
+ if (isGenerating) {
242
+ ClipQueueConstants.logEvent('Delaying simulation due to active generations');
243
+ return;
244
+ }
245
+
246
+ // Calculate time since last simulation
247
+ final now = DateTime.now();
248
+ final duration = now.difference(_lastDescriptionEvolutionTime);
249
+
250
+ // If we've waited long enough, simulate the video
251
+ if (duration.inSeconds >= Configuration.instance.simLoopFrequencyInSec) {
252
+ ClipQueueConstants.logEvent('Triggering simulation after ${duration.inSeconds} seconds');
253
+ await _evolveDescription();
254
+ _lastDescriptionEvolutionTime = now;
255
+ }
256
+ },
257
+ );
258
+ ClipQueueConstants.logEvent('Started simulation timer');
259
+ }
260
+
261
+ /// Simulate the video by evolving the description using the LLM
262
+ Future<void> _evolveDescription() async {
263
+ if (!_websocketService.isConnected) {
264
+ ClipQueueConstants.logEvent('Cannot simulate video: websocket not connected');
265
+ return;
266
+ }
267
+
268
+ int retryCount = 0;
269
+ const maxRetries = 2;
270
+
271
+ // Function to get chat message string
272
+ String getChatMessagesString() {
273
+ if (_recentChatMessages.isEmpty) return '';
274
+
275
+ return _recentChatMessages.map((msg) =>
276
+ "${msg.username}: ${msg.content}"
277
+ ).join("\n");
278
+ }
279
+
280
+ while (retryCount <= maxRetries) {
281
+ try {
282
+ // Format recent chat messages as a string for the simulation prompt
283
+ String chatMessagesString = getChatMessagesString();
284
+ if (chatMessagesString.isNotEmpty) {
285
+ ClipQueueConstants.logEvent('Including ${_recentChatMessages.length} chat messages in simulation');
286
+ }
287
+
288
+ // Use the WebSocketService to simulate the video
289
+ final result = await _websocketService.simulate(
290
+ videoId: video.id,
291
+ originalTitle: video.title,
292
+ originalDescription: video.description,
293
+ currentDescription: video.evolvedDescription.isEmpty ? video.description : video.evolvedDescription,
294
+ condensedHistory: video.condensedHistory,
295
+ evolutionCount: _evolutionCounter,
296
+ chatMessages: chatMessagesString,
297
+ );
298
+
299
+ // Update the video with the evolved description
300
+ video = video.copyWith(
301
+ evolvedDescription: result['evolved_description'],
302
+ condensedHistory: result['condensed_history'],
303
+ );
304
+
305
+ _evolutionCounter++;
306
+ ClipQueueConstants.logEvent('Video simulated (iteration $_evolutionCounter)');
307
+ onQueueUpdated?.call();
308
+
309
+ // Success, exit retry loop
310
+ break;
311
+ } catch (e) {
312
+ retryCount++;
313
+ ClipQueueConstants.logEvent('Error simulating video attempt $retryCount/$maxRetries: $e');
314
+
315
+ if (retryCount <= maxRetries) {
316
+ // Wait before retrying with exponential backoff
317
+ final delay = Duration(seconds: 1 << retryCount);
318
+ ClipQueueConstants.logEvent('Retrying simulation in ${delay.inSeconds} seconds...');
319
+ await Future.delayed(delay);
320
+ } else {
321
+ ClipQueueConstants.logEvent('Failed to simulate video after $maxRetries attempts');
322
+
323
+ // If we've been successful before but failed now, we can continue using the last evolved description
324
+ if (_evolutionCounter > 0) {
325
+ ClipQueueConstants.logEvent('Continuing with previous description');
326
+ }
327
+ }
328
+ }
329
+ }
330
+ }
331
 
332
  /// Mark a specific clip as played
333
  void markClipAsPlayed(String clipId) {
 
361
 
362
  // First ensure we have the correct buffer size
363
  while (_clipBuffer.length < Configuration.instance.renderQueueBufferSize) {
364
+ // Determine which description to use for the prompt
365
+ String descriptionToUse = video.description;
366
+
367
+ // If we have an evolved description, use that instead
368
+ if (video.evolvedDescription.isNotEmpty) {
369
+ descriptionToUse = video.evolvedDescription;
370
+ }
371
+
372
  final newClip = VideoClip(
373
+ prompt: "${video.title}\n${descriptionToUse}",
374
  seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
375
  orientation: _currentOrientation,
376
  );
 
492
  _clipBuffer.remove(clip);
493
  _clipHistory.add(clip);
494
 
495
+ // Determine which description to use for the prompt
496
+ String descriptionToUse = video.description;
497
+
498
+ // If we have an evolved description, use that instead
499
+ if (video.evolvedDescription.isNotEmpty) {
500
+ descriptionToUse = video.evolvedDescription;
501
+ ClipQueueConstants.logEvent('Using evolved description for new clip (evolution #$_evolutionCounter)');
502
+ }
503
+
504
  // Add a new pending clip with current orientation
505
  final newClip = VideoClip(
506
+ prompt: "${video.title}\n${descriptionToUse}",
507
  seed: video.useFixedSeed && video.seed > 0 ? video.seed : generateSeed(),
508
  orientation: _currentOrientation,
509
  );
 
594
  // Notify listeners
595
  onQueueUpdated?.call();
596
  }
597
+
598
+ /// Set the simulation pause state based on video playback
599
+ void setSimulationPaused(bool isPaused) {
600
+ if (_isSimulationPaused == isPaused) return;
601
+
602
+ _isSimulationPaused = isPaused;
603
+ ClipQueueConstants.logEvent(
604
+ isPaused
605
+ ? 'Simulation paused (video playback paused)'
606
+ : 'Simulation resumed (video playback resumed)'
607
+ );
608
+
609
+ // If we're resuming after a pause, update the last evolution time
610
+ // to avoid immediate evolution after resuming
611
+ if (!isPaused) {
612
+ _lastDescriptionEvolutionTime = DateTime.now();
613
+ }
614
+ }
615
 
616
  /// Print the current state of the queue
617
  void printQueueState() {
 
639
 
640
  // Cancel all timers first
641
  _bufferCheckTimer?.cancel();
642
+ _descriptionEvolutionTimer?.cancel();
643
 
644
  // Complete any pending generation completers
645
  for (var clip in _clipBuffer) {
lib/services/clip_queue/queue_stats_logger.dart CHANGED
@@ -52,10 +52,7 @@ class QueueStatsLogger {
52
  !_areStatesEqual(_lastLoggedState!, currentState) ||
53
  _shouldLogDueToTimeout()) {
54
 
55
- debugPrint('\n=== Queue State Change [$trigger] ===');
56
- debugPrint('Ready: ${currentState['readyClips']}');
57
- debugPrint('Playing: ${currentState['playingClips']}');
58
- debugPrint('Generating: ${currentState['generatingClips']}');
59
 
60
  /*
61
  debugPrint('Pending: ${currentState['pendingClips']}');
 
52
  !_areStatesEqual(_lastLoggedState!, currentState) ||
53
  _shouldLogDueToTimeout()) {
54
 
55
+ // debugPrint('Queue State Change [$trigger] => Ready: ${currentState['readyClips']}, Playing: ${currentState['playingClips']}, Generating: ${currentState['generatingClips']}');
 
 
 
56
 
57
  /*
58
  debugPrint('Pending: ${currentState['pendingClips']}');
lib/services/websocket_api_service.dart CHANGED
@@ -859,6 +859,11 @@ class WebSocketApiService {
859
  }
860
 
861
  Future<bool> sendChatMessage(ChatMessage message) async {
 
 
 
 
 
862
  if (!_initialized) {
863
  debugPrint('WebSocketApiService: Initializing before sending message...');
864
  await initialize();
@@ -867,6 +872,12 @@ class WebSocketApiService {
867
  try {
868
  debugPrint('WebSocketApiService: Sending chat message...');
869
 
 
 
 
 
 
 
870
  final response = await _sendRequest(
871
  WebSocketRequest(
872
  action: 'chat_message',
@@ -897,7 +908,7 @@ class WebSocketApiService {
897
  final action = data['action'] as String?;
898
  final requestId = data['requestId'] as String?;
899
 
900
- // debugPrint('WebSocketApiService: Received message for action: $action, requestId: $requestId');
901
 
902
  // Update user role if present in response (from heartbeat or get_user_role)
903
  if (data['user_role'] != null) {
@@ -912,10 +923,8 @@ class WebSocketApiService {
912
  if (requestId != null && _pendingRequests.containsKey(requestId)) {
913
  if (action == 'chat_message') {
914
  debugPrint('WebSocketApiService: Processing chat message response');
915
- // Extract the message data for chat messages
916
- if (data['success'] == true && data['message'] != null) {
917
- _handleChatMessage(data['message'] as Map<String, dynamic>);
918
- }
919
  _pendingRequests[requestId]!.complete(data);
920
  } else if (action == 'join_chat') {
921
  debugPrint('WebSocketApiService: Processing join chat response');
@@ -948,6 +957,11 @@ class WebSocketApiService {
948
  }
949
 
950
  void _handleChatMessage(Map<String, dynamic> data) {
 
 
 
 
 
951
  try {
952
  // Log the exact data we're trying to parse
953
  debugPrint('Parsing chat message data: ${json.encode(data)}');
@@ -964,7 +978,13 @@ class WebSocketApiService {
964
 
965
  final message = ChatMessage.fromJson(data);
966
  debugPrint('Successfully parsed message: ${message.toString()}');
967
- _chatController.add(message);
 
 
 
 
 
 
968
  } catch (e, stackTrace) {
969
  debugPrint('Error handling chat message: $e');
970
  debugPrint('Stack trace: $stackTrace');
@@ -974,11 +994,22 @@ class WebSocketApiService {
974
 
975
 
976
  void _handleChatHistory(Map<String, dynamic> data) {
 
 
 
 
 
977
  try {
978
  if (data['messages'] == null) {
979
  debugPrint('No messages found in chat history');
980
  return;
981
  }
 
 
 
 
 
 
982
 
983
  final messages = (data['messages'] as List).map((m) {
984
  try {
@@ -992,8 +1023,13 @@ class WebSocketApiService {
992
 
993
  debugPrint('Processing ${messages.length} historical messages');
994
 
995
- for (final message in messages) {
996
- _chatController.add(message);
 
 
 
 
 
997
  }
998
  } catch (e, stackTrace) {
999
  debugPrint('Error handling chat history: $e');
@@ -1075,7 +1111,7 @@ class WebSocketApiService {
1075
 
1076
  try {
1077
  final requestData = request.toJson();
1078
- // debugPrint('WebSocketApiService: Sending request ${request.requestId} (${request.action}): ${json.encode(requestData)}');
1079
  _channel!.sink.add(json.encode(requestData));
1080
 
1081
  final response = await completer.future.timeout(
@@ -1191,6 +1227,59 @@ class WebSocketApiService {
1191
  return response['caption'] as String;
1192
  }
1193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194
 
1195
  // Additional utility methods
1196
  Future<void> waitForConnection() async {
 
859
  }
860
 
861
  Future<bool> sendChatMessage(ChatMessage message) async {
862
+ if (_disposed) {
863
+ debugPrint('WebSocketApiService: Cannot send message, service is disposed');
864
+ throw Exception('WebSocketApiService is disposed');
865
+ }
866
+
867
  if (!_initialized) {
868
  debugPrint('WebSocketApiService: Initializing before sending message...');
869
  await initialize();
 
872
  try {
873
  debugPrint('WebSocketApiService: Sending chat message...');
874
 
875
+ // Check if the chatController is still open to avoid "Cannot add events after calling close" error
876
+ if (_chatController.isClosed) {
877
+ debugPrint('WebSocketApiService: Chat controller is closed, cannot process messages');
878
+ throw Exception('Chat controller is closed');
879
+ }
880
+
881
  final response = await _sendRequest(
882
  WebSocketRequest(
883
  action: 'chat_message',
 
908
  final action = data['action'] as String?;
909
  final requestId = data['requestId'] as String?;
910
 
911
+ debugPrint('WebSocketApiService: Received message for action: $action, requestId: $requestId');
912
 
913
  // Update user role if present in response (from heartbeat or get_user_role)
914
  if (data['user_role'] != null) {
 
923
  if (requestId != null && _pendingRequests.containsKey(requestId)) {
924
  if (action == 'chat_message') {
925
  debugPrint('WebSocketApiService: Processing chat message response');
926
+ // Don't process the message data from our own request since we already added it locally
927
+ // We only need to complete the request to signal success/failure
 
 
928
  _pendingRequests[requestId]!.complete(data);
929
  } else if (action == 'join_chat') {
930
  debugPrint('WebSocketApiService: Processing join chat response');
 
957
  }
958
 
959
  void _handleChatMessage(Map<String, dynamic> data) {
960
+ if (_disposed) {
961
+ debugPrint('WebSocketApiService: Skipping message handling, service is disposed');
962
+ return;
963
+ }
964
+
965
  try {
966
  // Log the exact data we're trying to parse
967
  debugPrint('Parsing chat message data: ${json.encode(data)}');
 
978
 
979
  final message = ChatMessage.fromJson(data);
980
  debugPrint('Successfully parsed message: ${message.toString()}');
981
+
982
+ // Only add to stream if it's still open
983
+ if (!_chatController.isClosed) {
984
+ _chatController.add(message);
985
+ } else {
986
+ debugPrint('WebSocketApiService: Chat controller is closed, cannot add message');
987
+ }
988
  } catch (e, stackTrace) {
989
  debugPrint('Error handling chat message: $e');
990
  debugPrint('Stack trace: $stackTrace');
 
994
 
995
 
996
  void _handleChatHistory(Map<String, dynamic> data) {
997
+ if (_disposed) {
998
+ debugPrint('WebSocketApiService: Skipping chat history handling, service is disposed');
999
+ return;
1000
+ }
1001
+
1002
  try {
1003
  if (data['messages'] == null) {
1004
  debugPrint('No messages found in chat history');
1005
  return;
1006
  }
1007
+
1008
+ // Check if chat controller is still open
1009
+ if (_chatController.isClosed) {
1010
+ debugPrint('WebSocketApiService: Chat controller is closed, cannot process chat history');
1011
+ return;
1012
+ }
1013
 
1014
  final messages = (data['messages'] as List).map((m) {
1015
  try {
 
1023
 
1024
  debugPrint('Processing ${messages.length} historical messages');
1025
 
1026
+ // Check again before adding messages in case it was closed during processing
1027
+ if (!_chatController.isClosed) {
1028
+ for (final message in messages) {
1029
+ _chatController.add(message);
1030
+ }
1031
+ } else {
1032
+ debugPrint('WebSocketApiService: Chat controller was closed during processing');
1033
  }
1034
  } catch (e, stackTrace) {
1035
  debugPrint('Error handling chat history: $e');
 
1111
 
1112
  try {
1113
  final requestData = request.toJson();
1114
+ debugPrint('WebSocketApiService: Sending request ${request.requestId} (${request.action}): ${json.encode(requestData)}');
1115
  _channel!.sink.add(json.encode(requestData));
1116
 
1117
  final response = await completer.future.timeout(
 
1227
  return response['caption'] as String;
1228
  }
1229
 
1230
+ /// Simulate a video by evolving its description to create a dynamic narrative
1231
+ Future<Map<String, String>> simulate({
1232
+ required String videoId,
1233
+ required String originalTitle,
1234
+ required String originalDescription,
1235
+ required String currentDescription,
1236
+ required String condensedHistory,
1237
+ int evolutionCount = 0,
1238
+ String chatMessages = '',
1239
+ }) async {
1240
+ // Skip if the API is not connected
1241
+ if (!isConnected) {
1242
+ debugPrint('WebSocketApiService: Cannot simulate video, not connected');
1243
+ return {
1244
+ 'evolved_description': currentDescription,
1245
+ 'condensed_history': condensedHistory
1246
+ };
1247
+ }
1248
+
1249
+ try {
1250
+ final response = await _sendRequest(
1251
+ WebSocketRequest(
1252
+ action: 'simulate',
1253
+ params: {
1254
+ 'video_id': videoId,
1255
+ 'original_title': originalTitle,
1256
+ 'original_description': originalDescription,
1257
+ 'current_description': currentDescription,
1258
+ 'condensed_history': condensedHistory,
1259
+ 'evolution_count': evolutionCount,
1260
+ 'chat_messages': chatMessages,
1261
+ },
1262
+ ),
1263
+ timeout: const Duration(seconds: 60),
1264
+ );
1265
+
1266
+ if (!response['success']) {
1267
+ throw Exception(response['error'] ?? 'Simulation failed');
1268
+ }
1269
+
1270
+ return {
1271
+ 'evolved_description': response['evolved_description'] as String? ?? currentDescription,
1272
+ 'condensed_history': response['condensed_history'] as String? ?? condensedHistory
1273
+ };
1274
+ } catch (e) {
1275
+ debugPrint('WebSocketApiService: Error simulating video: $e');
1276
+ return {
1277
+ 'evolved_description': currentDescription,
1278
+ 'condensed_history': condensedHistory
1279
+ };
1280
+ }
1281
+ }
1282
+
1283
 
1284
  // Additional utility methods
1285
  Future<void> waitForConnection() async {
lib/theme/colors.dart CHANGED
@@ -2,11 +2,12 @@
2
  import 'package:flutter/material.dart';
3
 
4
  class AiTubeColors {
5
- static const background = Color(0xFF0F0F0F);
6
- static const surface = Color(0xFF242424);
7
- static const surfaceVariant = Color(0xFF2D2D2D);
8
- static const primary = Colors.red;
9
- static const onBackground = Colors.white;
10
- static const onSurface = Colors.white;
 
11
  static const onSurfaceVariant = Colors.white70;
12
  }
 
2
  import 'package:flutter/material.dart';
3
 
4
  class AiTubeColors {
5
+ static const transparent = Color(0x00000000);
6
+ static const background = Color(0xFF171717);
7
+ static const surface = Color(0xFF272727);
8
+ static const surfaceVariant = Color(0xFF2B2B2B);
9
+ static const primary = Color.fromARGB(236, 214, 225, 4);
10
+ static const onBackground = Color.fromARGB(239, 255, 255, 255);
11
+ static const onSurface = Color.fromARGB(226, 255, 255, 255);
12
  static const onSurfaceVariant = Colors.white70;
13
  }
lib/widgets/ai_content_disclaimer.dart CHANGED
@@ -75,7 +75,7 @@ class AiContentDisclaimer extends StatelessWidget {
75
  child: LayoutBuilder(
76
  builder: (context, constraints) {
77
  // Scale text based on container width
78
- final baseSize = constraints.maxWidth / 40;
79
  final smallTextSize = baseSize * 0.7;
80
  final mediumTextSize = baseSize;
81
  final largeTextSize = baseSize * 1.1;
@@ -91,7 +91,7 @@ class AiContentDisclaimer extends StatelessWidget {
91
  textBaseline: TextBaseline.alphabetic,
92
  children: [
93
  Text(
94
- 'The following ',
95
  style: GoogleFonts.arimo(
96
  fontSize: smallTextSize,
97
  color: Colors.white,
@@ -108,7 +108,7 @@ class AiContentDisclaimer extends StatelessWidget {
108
  ),
109
  ),
110
  Text(
111
- 'footage',
112
  style: GoogleFonts.arimo(
113
  fontSize: mediumTextSize,
114
  color: Colors.white,
@@ -125,7 +125,7 @@ class AiContentDisclaimer extends StatelessWidget {
125
  ),
126
  ),
127
  Text(
128
- isInteractive ? ' will be ' : ' has been ',
129
  style: GoogleFonts.arimo(
130
  fontSize: smallTextSize,
131
  color: Colors.white,
@@ -142,7 +142,7 @@ class AiContentDisclaimer extends StatelessWidget {
142
  ),
143
  ),
144
  Text(
145
- 'synthesized',
146
  style: GoogleFonts.arimo(
147
  fontSize: mediumTextSize,
148
  color: Colors.white,
@@ -158,16 +158,8 @@ class AiContentDisclaimer extends StatelessWidget {
158
  ],
159
  ),
160
  ),
161
- ],
162
- ),
163
- const SizedBox(height: 18),
164
- Row(
165
- mainAxisAlignment: MainAxisAlignment.center,
166
- crossAxisAlignment: CrossAxisAlignment.baseline,
167
- textBaseline: TextBaseline.alphabetic,
168
- children: [
169
  Text(
170
- 'on the fly using a',
171
  style: GoogleFonts.arimo(
172
  fontSize: smallTextSize,
173
  color: Colors.white,
@@ -185,9 +177,10 @@ class AiContentDisclaimer extends StatelessWidget {
185
  ),
186
  ],
187
  ),
 
188
  const SizedBox(height: 18),
189
  Text(
190
- 'distilled video model',
191
  style: GoogleFonts.arimo(
192
  fontSize: largeTextSize,
193
  color: Colors.white,
@@ -204,23 +197,102 @@ class AiContentDisclaimer extends StatelessWidget {
204
  ),
205
  ),
206
  const SizedBox(height: 18),
207
- Text(
208
- 'and may contains visual glitches or hallucinations.',
209
- style: GoogleFonts.arimo(
210
- fontSize: smallTextSize,
211
- color: Colors.white,
212
- fontWeight: FontWeight.w500,
213
- letterSpacing: 1.2,
214
- height: 1.0,
215
- shadows: const [
216
- Shadow(
217
- offset: Offset(0, 2),
218
- blurRadius: 3.0,
219
- color: Color.fromRGBO(0, 0, 0, 0.3),
 
 
 
 
 
 
 
220
  ),
221
- ],
222
- ),
223
- textAlign: TextAlign.center,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  ),
225
  ],
226
  ),
 
75
  child: LayoutBuilder(
76
  builder: (context, constraints) {
77
  // Scale text based on container width
78
+ final baseSize = constraints.maxWidth / 35;
79
  final smallTextSize = baseSize * 0.7;
80
  final mediumTextSize = baseSize;
81
  final largeTextSize = baseSize * 1.1;
 
91
  textBaseline: TextBaseline.alphabetic,
92
  children: [
93
  Text(
94
+ 'THE FOLLOWING ',
95
  style: GoogleFonts.arimo(
96
  fontSize: smallTextSize,
97
  color: Colors.white,
 
108
  ),
109
  ),
110
  Text(
111
+ 'CONTENT',
112
  style: GoogleFonts.arimo(
113
  fontSize: mediumTextSize,
114
  color: Colors.white,
 
125
  ),
126
  ),
127
  Text(
128
+ isInteractive ? ' WILL BE ' : ' HAS BEEN ',
129
  style: GoogleFonts.arimo(
130
  fontSize: smallTextSize,
131
  color: Colors.white,
 
142
  ),
143
  ),
144
  Text(
145
+ 'SYNTHESIZED',
146
  style: GoogleFonts.arimo(
147
  fontSize: mediumTextSize,
148
  color: Colors.white,
 
158
  ],
159
  ),
160
  ),
 
 
 
 
 
 
 
 
161
  Text(
162
+ ' BY A',
163
  style: GoogleFonts.arimo(
164
  fontSize: smallTextSize,
165
  color: Colors.white,
 
177
  ),
178
  ],
179
  ),
180
+
181
  const SizedBox(height: 18),
182
  Text(
183
+ 'DISTILLED AI VIDEO MODEL',
184
  style: GoogleFonts.arimo(
185
  fontSize: largeTextSize,
186
  color: Colors.white,
 
197
  ),
198
  ),
199
  const SizedBox(height: 18),
200
+ Row(
201
+ mainAxisAlignment: MainAxisAlignment.center,
202
+ crossAxisAlignment: CrossAxisAlignment.baseline,
203
+ textBaseline: TextBaseline.alphabetic,
204
+ children: [
205
+ Text(
206
+ 'AND MAY CONTAIN',
207
+ style: GoogleFonts.arimo(
208
+ fontSize: smallTextSize,
209
+ color: Colors.white,
210
+ fontWeight: FontWeight.w500,
211
+ letterSpacing: 1.2,
212
+ height: 1.0,
213
+ shadows: const [
214
+ Shadow(
215
+ offset: Offset(0, 2),
216
+ blurRadius: 3.0,
217
+ color: Color.fromRGBO(0, 0, 0, 0.3),
218
+ ),
219
+ ],
220
  ),
221
+ textAlign: TextAlign.center,
222
+ ),
223
+ Text(
224
+ ' VISUAL GLITCHES',
225
+ style: GoogleFonts.arimo(
226
+ fontSize: mediumTextSize,
227
+ color: Colors.white,
228
+ fontWeight: FontWeight.w700,
229
+ letterSpacing: 1.2,
230
+ height: 1.0,
231
+ shadows: const [
232
+ Shadow(
233
+ offset: Offset(0, 2),
234
+ blurRadius: 3.0,
235
+ color: Color.fromRGBO(0, 0, 0, 0.3),
236
+ ),
237
+ ],
238
+ ),
239
+ textAlign: TextAlign.center,
240
+ ),
241
+ Text(
242
+ ' OR',
243
+ style: GoogleFonts.arimo(
244
+ fontSize: smallTextSize,
245
+ color: Colors.white,
246
+ fontWeight: FontWeight.w500,
247
+ letterSpacing: 1.2,
248
+ height: 1.0,
249
+ shadows: const [
250
+ Shadow(
251
+ offset: Offset(0, 2),
252
+ blurRadius: 3.0,
253
+ color: Color.fromRGBO(0, 0, 0, 0.3),
254
+ ),
255
+ ],
256
+ ),
257
+ textAlign: TextAlign.center,
258
+ ),
259
+ Text(
260
+ ' HALLUCINATIONS',
261
+ style: GoogleFonts.arimo(
262
+ fontSize: mediumTextSize,
263
+ color: Colors.white,
264
+ fontWeight: FontWeight.w700,
265
+ letterSpacing: 1.2,
266
+ height: 1.0,
267
+ shadows: const [
268
+ Shadow(
269
+ offset: Offset(0, 2),
270
+ blurRadius: 3.0,
271
+ color: Color.fromRGBO(0, 0, 0, 0.3),
272
+ ),
273
+ ],
274
+ ),
275
+ textAlign: TextAlign.center,
276
+ ),
277
+ Text(
278
+ '.',
279
+ style: GoogleFonts.arimo(
280
+ fontSize: smallTextSize,
281
+ color: Colors.white,
282
+ fontWeight: FontWeight.w500,
283
+ letterSpacing: 1.2,
284
+ height: 1.0,
285
+ shadows: const [
286
+ Shadow(
287
+ offset: Offset(0, 2),
288
+ blurRadius: 3.0,
289
+ color: Color.fromRGBO(0, 0, 0, 0.3),
290
+ ),
291
+ ],
292
+ ),
293
+ textAlign: TextAlign.center,
294
+ ),
295
+ ]
296
  ),
297
  ],
298
  ),
lib/widgets/chat_widget.dart CHANGED
@@ -25,6 +25,8 @@ class _ChatWidgetState extends State<ChatWidget> {
25
  final _messageController = TextEditingController();
26
  final _scrollController = ScrollController();
27
  final _messages = <ChatMessage>[];
 
 
28
  bool _isLoading = true;
29
  bool _isSending = false;
30
  String? _error;
@@ -120,11 +122,19 @@ class _ChatWidgetState extends State<ChatWidget> {
120
  void _onNewMessage(ChatMessage message) {
121
  if (!mounted) return;
122
 
 
 
 
 
 
 
123
  setState(() {
124
  _messages.add(message);
 
125
  // Keep only last 100 messages
126
  if (_messages.length > 100) {
127
- _messages.removeAt(0);
 
128
  }
129
  });
130
 
@@ -179,7 +189,7 @@ class _ChatWidgetState extends State<ChatWidget> {
179
  child: Text(
180
  message.username.substring(0, 1).toUpperCase(),
181
  style: const TextStyle(
182
- color: Colors.white,
183
  fontWeight: FontWeight.bold,
184
  ),
185
  ),
@@ -225,7 +235,7 @@ class _ChatWidgetState extends State<ChatWidget> {
225
  return Container(
226
  padding: const EdgeInsets.all(8),
227
  decoration: const BoxDecoration(
228
- color: AiTubeColors.surface,
229
  border: Border(
230
  top: BorderSide(
231
  color: AiTubeColors.surfaceVariant,
@@ -239,18 +249,48 @@ class _ChatWidgetState extends State<ChatWidget> {
239
  child: TextField(
240
  controller: _messageController,
241
  style: const TextStyle(color: AiTubeColors.onSurface),
242
- maxLength: 256,
 
 
 
 
 
 
 
 
 
 
243
  decoration: InputDecoration(
244
- hintText: 'Type a message...',
245
- hintStyle: const TextStyle(color: AiTubeColors.onSurfaceVariant),
246
  border: OutlineInputBorder(
247
- borderRadius: BorderRadius.circular(24),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  ),
249
  contentPadding: const EdgeInsets.symmetric(
250
- horizontal: 16,
251
- vertical: 8,
252
  ),
253
  counterText: '',
 
 
254
  ),
255
  ),
256
  ),
@@ -262,7 +302,7 @@ class _ChatWidgetState extends State<ChatWidget> {
262
  height: 20,
263
  child: CircularProgressIndicator(strokeWidth: 2),
264
  )
265
- : const Icon(Icons.send),
266
  color: AiTubeColors.primary,
267
  onPressed: _isSending ? null : _sendMessage,
268
  ),
@@ -377,7 +417,7 @@ class _ChatWidgetState extends State<ChatWidget> {
377
  Icon(Icons.chat, color: AiTubeColors.onBackground),
378
  SizedBox(width: 8),
379
  Text(
380
- 'Live Chat',
381
  style: TextStyle(
382
  color: AiTubeColors.onBackground,
383
  fontSize: 16,
@@ -402,11 +442,10 @@ class _ChatWidgetState extends State<ChatWidget> {
402
  _messageController.dispose();
403
  _scrollController.dispose();
404
 
405
- // Ensure chat room is left before disposal
406
- _chatService.leaveRoom(widget.videoId).then((_) {
407
- _chatService.dispose();
408
- }).catchError((error) {
409
- debugPrint('ChatWidget: Error during disposal: $error');
410
  });
411
 
412
  super.dispose();
 
25
  final _messageController = TextEditingController();
26
  final _scrollController = ScrollController();
27
  final _messages = <ChatMessage>[];
28
+ // Track message IDs to prevent duplicates
29
+ final _messageIds = <String>{};
30
  bool _isLoading = true;
31
  bool _isSending = false;
32
  String? _error;
 
122
  void _onNewMessage(ChatMessage message) {
123
  if (!mounted) return;
124
 
125
+ // Check if we already have this message (prevent duplicates)
126
+ if (_messageIds.contains(message.id)) {
127
+ debugPrint('ChatWidget: Skipping duplicate message with ID: ${message.id}');
128
+ return;
129
+ }
130
+
131
  setState(() {
132
  _messages.add(message);
133
+ _messageIds.add(message.id);
134
  // Keep only last 100 messages
135
  if (_messages.length > 100) {
136
+ final removedMessage = _messages.removeAt(0);
137
+ _messageIds.remove(removedMessage.id);
138
  }
139
  });
140
 
 
189
  child: Text(
190
  message.username.substring(0, 1).toUpperCase(),
191
  style: const TextStyle(
192
+ color: Colors.black,
193
  fontWeight: FontWeight.bold,
194
  ),
195
  ),
 
235
  return Container(
236
  padding: const EdgeInsets.all(8),
237
  decoration: const BoxDecoration(
238
+ color: AiTubeColors.transparent,
239
  border: Border(
240
  top: BorderSide(
241
  color: AiTubeColors.surfaceVariant,
 
249
  child: TextField(
250
  controller: _messageController,
251
  style: const TextStyle(color: AiTubeColors.onSurface),
252
+ maxLength: 255,
253
+ maxLines: 1,
254
+ onChanged: (value) {
255
+ // Enforce the character limit by trimming excess characters
256
+ if (value.length > 255) {
257
+ _messageController.text = value.substring(0, 255);
258
+ _messageController.selection = TextSelection.fromPosition(
259
+ const TextPosition(offset: 255),
260
+ );
261
+ }
262
+ },
263
  decoration: InputDecoration(
264
+ hintText: 'Chat with this aituber..',
265
+ hintStyle: const TextStyle(color: AiTubeColors.onSurfaceVariant, fontSize: 16),
266
  border: OutlineInputBorder(
267
+ borderRadius: BorderRadius.circular(12),
268
+ borderSide: const BorderSide(
269
+ color: Color(0x20FFFFFF),
270
+ width: 1,
271
+ ),
272
+ ),
273
+ enabledBorder: OutlineInputBorder(
274
+ borderRadius: BorderRadius.circular(12),
275
+ borderSide: const BorderSide(
276
+ color: Color(0x20FFFFFF),
277
+ width: 1,
278
+ ),
279
+ ),
280
+ focusedBorder: OutlineInputBorder(
281
+ borderRadius: BorderRadius.circular(12),
282
+ borderSide: const BorderSide(
283
+ color: AiTubeColors.primary,
284
+ width: 1,
285
+ ),
286
  ),
287
  contentPadding: const EdgeInsets.symmetric(
288
+ horizontal: 8,
289
+ vertical: 4,
290
  ),
291
  counterText: '',
292
+ filled: true,
293
+ fillColor: const Color(0x10000000),
294
  ),
295
  ),
296
  ),
 
302
  height: 20,
303
  child: CircularProgressIndicator(strokeWidth: 2),
304
  )
305
+ : const Icon(Icons.reply),
306
  color: AiTubeColors.primary,
307
  onPressed: _isSending ? null : _sendMessage,
308
  ),
 
417
  Icon(Icons.chat, color: AiTubeColors.onBackground),
418
  SizedBox(width: 8),
419
  Text(
420
+ 'Simulation log',
421
  style: TextStyle(
422
  color: AiTubeColors.onBackground,
423
  fontSize: 16,
 
442
  _messageController.dispose();
443
  _scrollController.dispose();
444
 
445
+ // Just leave the chat room, but don't dispose the ChatService
446
+ // since it's a singleton that may be used by other widgets
447
+ _chatService.leaveRoom(widget.videoId).catchError((error) {
448
+ debugPrint('ChatWidget: Error leaving chat room during disposal: $error');
 
449
  });
450
 
451
  super.dispose();
lib/widgets/search_box.dart CHANGED
@@ -67,7 +67,7 @@ class _SearchBoxState extends State<SearchBox> {
67
  FocusScope.of(context).unfocus();
68
  },
69
  decoration: InputDecoration(
70
- hintText: 'Describe a video you want to generate...',
71
  hintStyle: const TextStyle(color: AiTubeColors.onSurfaceVariant),
72
  filled: true,
73
  fillColor: AiTubeColors.surface,
 
67
  FocusScope.of(context).unfocus();
68
  },
69
  decoration: InputDecoration(
70
+ hintText: 'Imagine anything eg "F1 race with farming trucks"',
71
  hintStyle: const TextStyle(color: AiTubeColors.onSurfaceVariant),
72
  filled: true,
73
  fillColor: AiTubeColors.surface,
lib/widgets/video_player/video_player_widget.dart CHANGED
@@ -126,14 +126,28 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
126
  void _pauseVideo() {
127
  if (_playbackController.isPlaying) {
128
  _wasPlayingBeforeBackground = true;
129
- _togglePlayback();
 
 
 
 
 
 
 
130
  }
131
  }
132
 
133
  void _resumeVideo() {
134
  if (!_playbackController.isPlaying && _wasPlayingBeforeBackground) {
135
  _wasPlayingBeforeBackground = false;
136
- _togglePlayback();
 
 
 
 
 
 
 
137
  }
138
  }
139
 
@@ -229,6 +243,9 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
229
  }
230
 
231
  if (!_isDisposed && mounted) {
 
 
 
232
  setState(() {
233
  _playbackController.isLoading = false;
234
  _playbackController.isInitialLoad = false;
@@ -238,6 +255,10 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
238
 
239
  void _togglePlayback() {
240
  _playbackController.togglePlayback();
 
 
 
 
241
  if (!_isDisposed && mounted) {
242
  setState(() {});
243
  }
@@ -391,6 +412,11 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
391
  void dispose() {
392
  _isDisposed = true;
393
 
 
 
 
 
 
394
  // Unregister the observer
395
  WidgetsBinding.instance.removeObserver(this);
396
 
 
126
  void _pauseVideo() {
127
  if (_playbackController.isPlaying) {
128
  _wasPlayingBeforeBackground = true;
129
+
130
+ // Manually pause playback and simulation together
131
+ _playbackController.togglePlayback();
132
+ _bufferManager.queueManager.setSimulationPaused(true);
133
+
134
+ if (!_isDisposed && mounted) {
135
+ setState(() {});
136
+ }
137
  }
138
  }
139
 
140
  void _resumeVideo() {
141
  if (!_playbackController.isPlaying && _wasPlayingBeforeBackground) {
142
  _wasPlayingBeforeBackground = false;
143
+
144
+ // Manually resume playback and simulation together
145
+ _playbackController.togglePlayback();
146
+ _bufferManager.queueManager.setSimulationPaused(false);
147
+
148
+ if (!_isDisposed && mounted) {
149
+ setState(() {});
150
+ }
151
  }
152
  }
153
 
 
243
  }
244
 
245
  if (!_isDisposed && mounted) {
246
+ // Initialize simulation pause state based on initial autoPlay setting
247
+ _bufferManager.queueManager.setSimulationPaused(!widget.autoPlay);
248
+
249
  setState(() {
250
  _playbackController.isLoading = false;
251
  _playbackController.isInitialLoad = false;
 
255
 
256
  void _togglePlayback() {
257
  _playbackController.togglePlayback();
258
+
259
+ // Control the simulation based on playback state
260
+ _bufferManager.queueManager.setSimulationPaused(!_playbackController.isPlaying);
261
+
262
  if (!_isDisposed && mounted) {
263
  setState(() {});
264
  }
 
412
  void dispose() {
413
  _isDisposed = true;
414
 
415
+ // Ensure simulation is paused when widget is disposed
416
+ if (_bufferManager.queueManager != null) {
417
+ _bufferManager.queueManager.setSimulationPaused(true);
418
+ }
419
+
420
  // Unregister the observer
421
  WidgetsBinding.instance.removeObserver(this);
422