jbilcke-hf HF Staff commited on
Commit
e15d9f5
·
1 Parent(s): 2e813e6

small fixes

Browse files
.claude/settings.local.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(flutter build:*)"
5
+ ],
6
+ "deny": []
7
+ }
8
+ }
api_config.py CHANGED
@@ -3,10 +3,8 @@ import os
3
  PRODUCT_NAME = os.environ.get('PRODUCT_NAME', 'AiTube')
4
  PRODUCT_VERSION = "2.0.0"
5
 
6
- TEXT_MODEL = os.environ.get('HF_TEXT_MODEL',
7
- #'HuggingFaceH4/zephyr-7b-beta'
8
- 'HuggingFaceTB/SmolLM2-1.7B-Instruct'
9
- )
10
 
11
  # Environment variable to control maintenance mode
12
  MAINTENANCE_MODE = os.environ.get('MAINTENANCE_MODE', 'false').lower() in ('true', 'yes', '1', 't')
@@ -90,8 +88,8 @@ CONFIG_FOR_STANDARD_HF_USERS = {
90
  "max_rendering_time_per_client_per_video_in_sec": 15 * 60,
91
 
92
  "min_num_inference_steps": 2,
93
- "default_num_inference_steps": 8,
94
- "max_num_inference_steps": 8,
95
 
96
  "min_num_frames": 9, # 8 + 1
97
  "default_num_frames": 81, # 8*10 + 1
@@ -110,12 +108,12 @@ CONFIG_FOR_STANDARD_HF_USERS = {
110
  "max_clip_framerate": 25,
111
 
112
  "min_clip_width": 544,
113
- "default_clip_width": 928, # 1216, # 768, # 640,
114
- "max_clip_width": 928, # 1216, # 768, # 640,
115
 
116
  "min_clip_height": 320,
117
- "default_clip_height": 512, # 448, # 416,
118
- "max_clip_height": 512, # 448, # 416,
119
  }
120
 
121
  # Hugging Face users with a Pro may enjoy an improved experience
@@ -123,8 +121,8 @@ CONFIG_FOR_PRO_HF_USERS = {
123
  "max_rendering_time_per_client_per_video_in_sec": 20 * 60,
124
 
125
  "min_num_inference_steps": 2,
126
- "default_num_inference_steps": 8,
127
- "max_num_inference_steps": 8,
128
 
129
  "min_num_frames": 9, # 8 + 1
130
  "default_num_frames": 81, # 8*10 + 1
@@ -155,8 +153,8 @@ CONFIG_FOR_ADMIN_HF_USERS = {
155
  "max_rendering_time_per_client_per_video_in_sec": 60 * 60,
156
 
157
  "min_num_inference_steps": 2,
158
- "default_num_inference_steps": 6,
159
- "max_num_inference_steps": 8,
160
 
161
  "min_num_frames": 9, # 8 + 1
162
  "default_num_frames": 81, # (8 * 10) + 1
@@ -180,7 +178,7 @@ CONFIG_FOR_ADMIN_HF_USERS = {
180
 
181
  "min_clip_height": 320,
182
  "default_clip_height": 480,
183
- "max_clip_height": 704,
184
  }
185
 
186
  CONFIG_FOR_ANONYMOUS_USERS = CONFIG_FOR_STANDARD_HF_USERS
 
3
  PRODUCT_NAME = os.environ.get('PRODUCT_NAME', 'AiTube')
4
  PRODUCT_VERSION = "2.0.0"
5
 
6
+ # you should use Mistral 7b instruct for good performance and accuracy balance
7
+ TEXT_MODEL = os.environ.get('HF_TEXT_MODEL', '')
 
 
8
 
9
  # Environment variable to control maintenance mode
10
  MAINTENANCE_MODE = os.environ.get('MAINTENANCE_MODE', 'false').lower() in ('true', 'yes', '1', 't')
 
88
  "max_rendering_time_per_client_per_video_in_sec": 15 * 60,
89
 
90
  "min_num_inference_steps": 2,
91
+ "default_num_inference_steps": 4,
92
+ "max_num_inference_steps": 4,
93
 
94
  "min_num_frames": 9, # 8 + 1
95
  "default_num_frames": 81, # 8*10 + 1
 
108
  "max_clip_framerate": 25,
109
 
110
  "min_clip_width": 544,
111
+ "default_clip_width": 1216, # 928, # 1216, # 768, # 640,
112
+ "max_clip_width": 1216, # 928, # 1216, # 768, # 640,
113
 
114
  "min_clip_height": 320,
115
+ "default_clip_height": 672, # 512, # 448, # 416,
116
+ "max_clip_height": 672, # 512, # 448, # 416,
117
  }
118
 
119
  # Hugging Face users with a Pro may enjoy an improved experience
 
121
  "max_rendering_time_per_client_per_video_in_sec": 20 * 60,
122
 
123
  "min_num_inference_steps": 2,
124
+ "default_num_inference_steps": 4,
125
+ "max_num_inference_steps": 4,
126
 
127
  "min_num_frames": 9, # 8 + 1
128
  "default_num_frames": 81, # 8*10 + 1
 
153
  "max_rendering_time_per_client_per_video_in_sec": 60 * 60,
154
 
155
  "min_num_inference_steps": 2,
156
+ "default_num_inference_steps": 4,
157
+ "max_num_inference_steps": 4,
158
 
159
  "min_num_frames": 9, # 8 + 1
160
  "default_num_frames": 81, # (8 * 10) + 1
 
178
 
179
  "min_clip_height": 320,
180
  "default_clip_height": 480,
181
+ "max_clip_height": 672,
182
  }
183
 
184
  CONFIG_FOR_ANONYMOUS_USERS = CONFIG_FOR_STANDARD_HF_USERS
api_core.py CHANGED
@@ -294,23 +294,31 @@ class VideoGenerationAPI:
294
  raise Exception(f"Failed to download video: HTTP {response.status}")
295
  return await response.read()
296
 
297
- async def search_video(self, query: str, search_count: int = 0, attempt_count: int = 0) -> Optional[dict]:
298
  """Generate a single search result using HF text generation"""
299
  # Maximum number of attempts to generate a description without placeholder tags
300
  max_attempts = 2
301
  current_attempt = attempt_count
302
- temperature = 0.7 # Initial temperature
 
 
303
 
304
  while current_attempt <= max_attempts:
305
  prompt = f"""# Instruction
306
  Your response MUST be a YAML object containing a title and description, consistent with what we can find on a video sharing platform.
307
  Format your YAML response with only those fields: "title" (a short string) and "description" (string caption of the scene). Do not add any other field.
308
- In the description field, describe in a very synthetic way the visuals of the first shot (first scene), eg "<STYLE>, medium close-up shot, high angle view of a <AGE>yo <GENDER> <CHARACTERS> <ACTIONS>, <LOCATION> <LIGHTING> <WEATHER>". This is just an example! you MUST replace the <TAGS>!!. Don't forget to replace <STYLE> etc, by the actual fields!! Keep it minimalist but still descriptive, don't use bullets points, use simple words, go to the essential to describe style (cinematic, documentary footage, 3D rendering..), camera modes and angles, characters, age, gender, action, location, lighting, country, costume, time, weather, textures, color palette.. etc.
 
 
 
 
309
  The most import part is to describe the actions and movements in the scene, so don't forget that!
 
 
310
  Make the result unique and different from previous search results. ONLY RETURN YAML AND WITH ENGLISH CONTENT, NOT CHINESE - DO NOT ADD ANY OTHER COMMENT!
311
 
312
  # Context
313
- This is attempt {current_attempt} at generating search result number {search_count}.
314
 
315
  # Input
316
  Describe the first scene/shot for: "{query}".
@@ -321,13 +329,12 @@ Describe the first scene/shot for: "{query}".
321
  title: \""""
322
 
323
  try:
324
- #print(f"search_video(): calling self.inference_client.text_generation({prompt}, model={TEXT_MODEL}, max_new_tokens=150, temperature={temperature})")
325
  response = await asyncio.get_event_loop().run_in_executor(
326
  None,
327
  lambda: self.inference_client.text_generation(
328
  prompt,
329
  model=TEXT_MODEL,
330
- max_new_tokens=150,
331
  temperature=temperature
332
  )
333
  )
@@ -344,7 +351,7 @@ title: \""""
344
  if not result or not isinstance(result, dict):
345
  logger.error(f"Invalid result format: {result}")
346
  current_attempt += 1
347
- temperature = 0.65 # Try with different temperature on next attempt
348
  continue
349
 
350
  # Extract fields with defaults
@@ -355,32 +362,20 @@ title: \""""
355
  if re.search(r'<[A-Z_]+>', description):
356
  #logger.warning(f"Description still contains placeholder tags: {description}")
357
  if current_attempt < max_attempts:
358
- # Try again with a higher temperature
359
  current_attempt += 1
360
- temperature = 0.6
361
  continue
362
  else:
363
  # If we've reached max attempts, use the title as description
364
  description = title
365
-
366
- # legacy system of tags -- I've decided to to generate them anymore to save some speed
367
- tags = result.get('tags', [])
368
-
369
- # Ensure tags is a list of strings
370
- if not isinstance(tags, list):
371
- tags = []
372
- tags = [str(t).strip() for t in tags if t and isinstance(t, (str, int, float))]
373
-
374
- # Don't generate thumbnails upfront - let the frontend generate them on demand
375
- # This makes search results load faster
376
- thumbnail = ""
377
 
378
  # Return valid result with all required fields
379
  return {
380
  'id': str(uuid.uuid4()),
381
  'title': title,
382
  'description': description,
383
- 'thumbnailUrl': thumbnail,
384
  'videoUrl': '',
385
 
386
  # not really used yet, maybe one day if we pre-generate or store content
@@ -390,23 +385,23 @@ title: \""""
390
 
391
  'seed': generate_seed(),
392
  'views': 0,
393
- 'tags': tags
394
  }
395
 
396
  except Exception as e:
397
  logger.error(f"Search video generation failed: {str(e)}")
398
  current_attempt += 1
399
- temperature = 0.7 # Try with different temperature on next attempt
400
 
401
  # If all attempts failed, return a simple result with title only
402
  return {
403
  'id': str(uuid.uuid4()),
404
  'title': f"Video about {query}",
405
  'description': f"Video about {query}",
406
- 'thumbnailUrl': "",
407
  'videoUrl': '',
408
  'isLatent': True,
409
- 'useFixedSeed': False,
410
  'seed': generate_seed(),
411
  'views': 0,
412
  'tags': []
@@ -523,6 +518,7 @@ Please write the caption for a new clip.
523
  5. Keep visual consistency with previous clips (in most cases you should repeat the same exact description of the location, characters etc but only change a few elements. If this is a webcam scenario, don't touch the camera orientation or focus)
524
  6. Return ONLY the caption text, no additional formatting or explanation
525
  7. Write in English, about 200 words.
 
526
  8. Your caption must describe visual elements of the scene in details, including: camera angle and focus, people's appearance, age, look, costumes, clothes, the location visual characteristics and geometry, lighting, action, objects, weather, textures, lighting.
527
 
528
  # Examples
 
294
  raise Exception(f"Failed to download video: HTTP {response.status}")
295
  return await response.read()
296
 
297
+ async def search_video(self, query: str, attempt_count: int = 0) -> Optional[dict]:
298
  """Generate a single search result using HF text generation"""
299
  # Maximum number of attempts to generate a description without placeholder tags
300
  max_attempts = 2
301
  current_attempt = attempt_count
302
+ # Use a random temperature between 0.68 and 0.72 to generate more diverse results
303
+ # and prevent duplicate results from successive calls with the same prompt
304
+ temperature = random.uniform(0.68, 0.72)
305
 
306
  while current_attempt <= max_attempts:
307
  prompt = f"""# Instruction
308
  Your response MUST be a YAML object containing a title and description, consistent with what we can find on a video sharing platform.
309
  Format your YAML response with only those fields: "title" (a short string) and "description" (string caption of the scene). Do not add any other field.
310
+ In the description field, describe in a very synthetic way the visuals of the first shot (first scene), eg "<STYLE>, medium close-up shot, high angle view of a <AGE>yo <GENDER> <CHARACTERS> <ACTIONS>, <LOCATION> <LIGHTING> <WEATHER>". This is just an example! you MUST replace the <TAGS>!!.
311
+ Don't forget to replace <STYLE> etc, by the actual fields!!
312
+ For the style, be creative, for instance you can use anything like a "documentary footage", "japanese animation", "movie scene", "tv series", "tv show", "security footage" etc.
313
+ If the user ask for something specific eg an anime use "japanese animation, ", for a documentary about animals in nature use "documentary footage, " as a style etc.
314
+ Keep it minimalist but still descriptive, don't use bullets points, use simple words, go to the essential to describe style (cinematic, documentary footage, 3D rendering..), camera modes and angles, characters, age, gender, action, location, lighting, country, costume, time, weather, textures, color palette.. etc).
315
  The most import part is to describe the actions and movements in the scene, so don't forget that!
316
+ Don't describe sound, so ever say things like "atmospheric music playing in the background".
317
+ Instead describe the visual elements we can see in the background, be precise, (if there are anything, cars, objects, people, bricks, birds, clouds, trees, leaves or grass then say it so etc).
318
  Make the result unique and different from previous search results. ONLY RETURN YAML AND WITH ENGLISH CONTENT, NOT CHINESE - DO NOT ADD ANY OTHER COMMENT!
319
 
320
  # Context
321
+ This is attempt {current_attempt}.
322
 
323
  # Input
324
  Describe the first scene/shot for: "{query}".
 
329
  title: \""""
330
 
331
  try:
 
332
  response = await asyncio.get_event_loop().run_in_executor(
333
  None,
334
  lambda: self.inference_client.text_generation(
335
  prompt,
336
  model=TEXT_MODEL,
337
+ max_new_tokens=200,
338
  temperature=temperature
339
  )
340
  )
 
351
  if not result or not isinstance(result, dict):
352
  logger.error(f"Invalid result format: {result}")
353
  current_attempt += 1
354
+ temperature = random.uniform(0.68, 0.72) # Try with different random temperature on next attempt
355
  continue
356
 
357
  # Extract fields with defaults
 
362
  if re.search(r'<[A-Z_]+>', description):
363
  #logger.warning(f"Description still contains placeholder tags: {description}")
364
  if current_attempt < max_attempts:
365
+ # Try again with a different random temperature
366
  current_attempt += 1
367
+ temperature = random.uniform(0.68, 0.72)
368
  continue
369
  else:
370
  # If we've reached max attempts, use the title as description
371
  description = title
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
  # Return valid result with all required fields
374
  return {
375
  'id': str(uuid.uuid4()),
376
  'title': title,
377
  'description': description,
378
+ 'thumbnailUrl': '',
379
  'videoUrl': '',
380
 
381
  # not really used yet, maybe one day if we pre-generate or store content
 
385
 
386
  'seed': generate_seed(),
387
  'views': 0,
388
+ 'tags': []
389
  }
390
 
391
  except Exception as e:
392
  logger.error(f"Search video generation failed: {str(e)}")
393
  current_attempt += 1
394
+ temperature = random.uniform(0.68, 0.72) # Try with different random temperature on next attempt
395
 
396
  # If all attempts failed, return a simple result with title only
397
  return {
398
  'id': str(uuid.uuid4()),
399
  'title': f"Video about {query}",
400
  'description': f"Video about {query}",
401
+ 'thumbnailUrl': '',
402
  'videoUrl': '',
403
  'isLatent': True,
404
+ 'useFixedSeed': "query" in description.lower(),
405
  'seed': generate_seed(),
406
  'views': 0,
407
  'tags': []
 
518
  5. Keep visual consistency with previous clips (in most cases you should repeat the same exact description of the location, characters etc but only change a few elements. If this is a webcam scenario, don't touch the camera orientation or focus)
519
  6. Return ONLY the caption text, no additional formatting or explanation
520
  7. Write in English, about 200 words.
521
+ 8. Keep the visual style consistant, but content as well (repeat the style, character, locations, appearance etc.. across scenes, when it makes sense).
522
  8. Your caption must describe visual elements of the scene in details, including: camera angle and focus, people's appearance, age, look, costumes, clothes, the location visual characteristics and geometry, lighting, action, objects, weather, textures, lighting.
523
 
524
  # Examples
api_session.py CHANGED
@@ -196,10 +196,9 @@ class UserSession:
196
  data = await self.search_queue.get()
197
  request_id = data.get('requestId')
198
  query = data.get('query', '').strip()
199
- search_count = data.get('searchCount', 0)
200
  attempt_count = data.get('attemptCount', 0)
201
 
202
- logger.info(f"Processing search request for user {self.user_id}: query='{query}', search_count={search_count}, attempt={attempt_count}")
203
 
204
  if not query:
205
  logger.warning(f"Empty query received in request from user {self.user_id}: {data}")
@@ -213,12 +212,11 @@ class UserSession:
213
  try:
214
  search_result = await self.shared_api.search_video(
215
  query,
216
- search_count=search_count,
217
  attempt_count=attempt_count
218
  )
219
 
220
  if search_result:
221
- logger.info(f"Search successful for user {self.user_id}, query '{query}' (#{search_count})")
222
  result = {
223
  'action': 'search',
224
  'requestId': request_id,
@@ -226,7 +224,7 @@ class UserSession:
226
  'result': search_result
227
  }
228
  else:
229
- logger.warning(f"No results found for user {self.user_id}, query '{query}' (#{search_count})")
230
  result = {
231
  'action': 'search',
232
  'requestId': request_id,
@@ -234,7 +232,7 @@ class UserSession:
234
  'error': 'No results found'
235
  }
236
  except Exception as e:
237
- logger.error(f"Search error for user {self.user_id}, query '{query}' (#{search_count}, attempt {attempt_count}): {str(e)}")
238
  result = {
239
  'action': 'search',
240
  'requestId': request_id,
 
196
  data = await self.search_queue.get()
197
  request_id = data.get('requestId')
198
  query = data.get('query', '').strip()
 
199
  attempt_count = data.get('attemptCount', 0)
200
 
201
+ logger.info(f"Processing search request for user {self.user_id}: query='{query}', attempt={attempt_count}")
202
 
203
  if not query:
204
  logger.warning(f"Empty query received in request from user {self.user_id}: {data}")
 
212
  try:
213
  search_result = await self.shared_api.search_video(
214
  query,
 
215
  attempt_count=attempt_count
216
  )
217
 
218
  if search_result:
219
+ logger.info(f"Search successful for user {self.user_id}, query '{query}'")
220
  result = {
221
  'action': 'search',
222
  'requestId': request_id,
 
224
  'result': search_result
225
  }
226
  else:
227
+ logger.warning(f"No results found for user {self.user_id}, query '{query}'")
228
  result = {
229
  'action': 'search',
230
  'requestId': request_id,
 
232
  'error': 'No results found'
233
  }
234
  except Exception as e:
235
+ logger.error(f"Search error for user {self.user_id}, query '{query}' (attempt {attempt_count}): {str(e)}")
236
  result = {
237
  'action': 'search',
238
  'requestId': request_id,
assets/config/aitube.yaml CHANGED
@@ -37,7 +37,8 @@ video:
37
  # number of frames below 257.
38
 
39
  # number of inference steps
40
- num_inference_steps: 4
 
41
 
42
  guidance_scale: 1.0
43
 
@@ -45,8 +46,11 @@ video:
45
  # in frames per second (so an integer)
46
  original_clip_frame_rate: 25
47
 
48
- original_clip_width: 1216 # 544
49
- original_clip_height: 704 # 320
 
 
 
50
 
51
  # to do more with less, we slow down the videos (a 3s video will become a 4s video)
52
  # but if you are GPU rich feel feel to play them back at 100% of their speed!
 
37
  # number of frames below 257.
38
 
39
  # number of inference steps
40
+ # (this is capped by the backend API)
41
+ num_inference_steps: 8
42
 
43
  guidance_scale: 1.0
44
 
 
46
  # in frames per second (so an integer)
47
  original_clip_frame_rate: 25
48
 
49
+ # (this is capped by the backend API)
50
+ original_clip_width: 1216
51
+
52
+ # (this is capped by the backend API)
53
+ original_clip_height: 672
54
 
55
  # to do more with less, we slow down the videos (a 3s video will become a 4s video)
56
  # but if you are GPU rich feel feel to play them back at 100% of their speed!
assets/logo/aitube.png ADDED

Git LFS Details

  • SHA256: 002d55416ccd330cc1db5b6b42be4aa27c13144859ec495b6430b7a452df779f
  • Pointer size: 131 Bytes
  • Size of remote file: 334 kB
assets/logo/aitube.svg ADDED
build/web/aitube.png ADDED

Git LFS Details

  • SHA256: 002d55416ccd330cc1db5b6b42be4aa27c13144859ec495b6430b7a452df779f
  • Pointer size: 131 Bytes
  • Size of remote file: 334 kB
build/web/aitube.svg ADDED
build/web/assets/assets/config/aitube.yaml CHANGED
@@ -37,7 +37,8 @@ video:
37
  # number of frames below 257.
38
 
39
  # number of inference steps
40
- num_inference_steps: 4
 
41
 
42
  guidance_scale: 1.0
43
 
@@ -45,8 +46,11 @@ video:
45
  # in frames per second (so an integer)
46
  original_clip_frame_rate: 25
47
 
48
- original_clip_width: 1216 # 544
49
- original_clip_height: 704 # 320
 
 
 
50
 
51
  # to do more with less, we slow down the videos (a 3s video will become a 4s video)
52
  # but if you are GPU rich feel feel to play them back at 100% of their speed!
 
37
  # number of frames below 257.
38
 
39
  # number of inference steps
40
+ # (this is capped by the backend API)
41
+ num_inference_steps: 8
42
 
43
  guidance_scale: 1.0
44
 
 
46
  # in frames per second (so an integer)
47
  original_clip_frame_rate: 25
48
 
49
+ # (this is capped by the backend API)
50
+ original_clip_width: 1216
51
+
52
+ # (this is capped by the backend API)
53
+ original_clip_height: 672
54
 
55
  # to do more with less, we slow down the videos (a 3s video will become a 4s video)
56
  # but if you are GPU rich feel feel to play them back at 100% of their speed!
build/web/favicon.png CHANGED

Git LFS Details

  • SHA256: 7ab2525f4b86b65d3e4c70358a17e5a1aaf6f437f99cbcc046dad73d59bb9015
  • Pointer size: 128 Bytes
  • Size of remote file: 917 Bytes

Git LFS Details

  • SHA256: 2b073e3ac7cf84804f34bb19ac871566ca4f0ba4ce2f2479a7ea8e4543b38244
  • Pointer size: 131 Bytes
  • Size of remote file: 404 kB
build/web/flutter_bootstrap.js CHANGED
@@ -39,6 +39,6 @@ _flutter.buildConfig = {"engineRevision":"382be0028d370607f76215a9be322e5514b263
39
 
40
  _flutter.loader.load({
41
  serviceWorkerSettings: {
42
- serviceWorkerVersion: "243753163"
43
  }
44
  });
 
39
 
40
  _flutter.loader.load({
41
  serviceWorkerSettings: {
42
+ serviceWorkerVersion: "1407286780"
43
  }
44
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,17 +3,18 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "2e5d8c2674e92a260d84eeb3342bc39a",
7
  "version.json": "b5eaae4fc120710a3c35125322173615",
8
  "index.html": "f34c56fffc6b38f62412a5db2315dec8",
9
  "/": "f34c56fffc6b38f62412a5db2315dec8",
10
- "main.dart.js": "f47ca1e506a05d335168c4b1777590e3",
11
  "flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
12
- "favicon.png": "5dcef449791fa27946b3d35ad8803796",
13
- "icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
14
- "icons/Icon-maskable-192.png": "c457ef57daa1d16f64b27b786ec2ea3c",
15
- "icons/Icon-maskable-512.png": "301a7604d45b3e739efc881eb04896ea",
16
- "icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
 
17
  "manifest.json": "7dc942a630334c1017089988a6ca07d4",
18
  "assets/AssetManifest.json": "51a53d0237971d07d6d88304c41bf6fb",
19
  "assets/NOTICES": "f0cfae681e209e19b2b144a9f062a96f",
@@ -25,7 +26,7 @@ const RESOURCES = {"flutter_bootstrap.js": "2e5d8c2674e92a260d84eeb3342bc39a",
25
  "assets/fonts/MaterialIcons-Regular.otf": "a9126745a3792756bbb88c84ed40e354",
26
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
27
  "assets/assets/config/custom.yaml": "e5c0b238b6f217f1215fbc813f093656",
28
- "assets/assets/config/aitube.yaml": "29ed15827ee8364e390a3b446535067a",
29
  "assets/assets/config/default.yaml": "ba11c9ae686f1317a29bce114f0f9fc9",
30
  "canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
31
  "canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
@@ -35,7 +36,8 @@ const RESOURCES = {"flutter_bootstrap.js": "2e5d8c2674e92a260d84eeb3342bc39a",
35
  "canvaskit/chromium/canvaskit.js": "8191e843020c832c9cf8852a4b909d4c",
36
  "canvaskit/chromium/canvaskit.wasm": "c054c2c892172308ca5a0bd1d7a7754b",
37
  "canvaskit/canvaskit.js": "728b2d477d9b8c14593d4f9b82b484f3",
38
- "canvaskit/canvaskit.wasm": "a37f2b0af4995714de856e21e882325c"};
 
39
  // The application shell files that are downloaded before a service worker can
40
  // start.
41
  const CORE = ["main.dart.js",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "4ce0774ba25b67f0552e93fa9550b410",
7
  "version.json": "b5eaae4fc120710a3c35125322173615",
8
  "index.html": "f34c56fffc6b38f62412a5db2315dec8",
9
  "/": "f34c56fffc6b38f62412a5db2315dec8",
10
+ "main.dart.js": "56795fe6258579e1283f14325d0fd5ac",
11
  "flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
12
+ "aitube.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
+ "favicon.png": "c8a183c516004e648a7bac7497c89b97",
14
+ "icons/Icon-192.png": "9d17785814071b986002307441ec7a8f",
15
+ "icons/Icon-maskable-192.png": "9d17785814071b986002307441ec7a8f",
16
+ "icons/Icon-maskable-512.png": "8682b581a7dab984ef4f9b7f21976a64",
17
+ "icons/Icon-512.png": "8682b581a7dab984ef4f9b7f21976a64",
18
  "manifest.json": "7dc942a630334c1017089988a6ca07d4",
19
  "assets/AssetManifest.json": "51a53d0237971d07d6d88304c41bf6fb",
20
  "assets/NOTICES": "f0cfae681e209e19b2b144a9f062a96f",
 
26
  "assets/fonts/MaterialIcons-Regular.otf": "a9126745a3792756bbb88c84ed40e354",
27
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
28
  "assets/assets/config/custom.yaml": "e5c0b238b6f217f1215fbc813f093656",
29
+ "assets/assets/config/aitube.yaml": "8979c75b20586a238936887a2f24b890",
30
  "assets/assets/config/default.yaml": "ba11c9ae686f1317a29bce114f0f9fc9",
31
  "canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
32
  "canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
 
36
  "canvaskit/chromium/canvaskit.js": "8191e843020c832c9cf8852a4b909d4c",
37
  "canvaskit/chromium/canvaskit.wasm": "c054c2c892172308ca5a0bd1d7a7754b",
38
  "canvaskit/canvaskit.js": "728b2d477d9b8c14593d4f9b82b484f3",
39
+ "canvaskit/canvaskit.wasm": "a37f2b0af4995714de856e21e882325c",
40
+ "aitube.png": "570e1db759046e2d224fef729983634e"};
41
  // The application shell files that are downloaded before a service worker can
42
  // start.
43
  const CORE = ["main.dart.js",
build/web/icons/Icon-192.png CHANGED

Git LFS Details

  • SHA256: 3dce99077602f70421c1c6b2a240bc9b83d64d86681d45f2154143310c980be3
  • Pointer size: 129 Bytes
  • Size of remote file: 5.29 kB

Git LFS Details

  • SHA256: 47db284aa93b2335f724f16fe607052973840393762926373cac4ba3a54df6d7
  • Pointer size: 130 Bytes
  • Size of remote file: 47.4 kB
build/web/icons/Icon-512.png CHANGED

Git LFS Details

  • SHA256: baccb205ae45f0b421be1657259b4943ac40c95094ab877f3bcbe12cd544dcbe
  • Pointer size: 129 Bytes
  • Size of remote file: 8.25 kB

Git LFS Details

  • SHA256: 1bd347d36930a178cff9b09973a50151eb419df8641f25acb316e3a671a30c66
  • Pointer size: 131 Bytes
  • Size of remote file: 235 kB
build/web/icons/Icon-maskable-192.png CHANGED

Git LFS Details

  • SHA256: d2c842e22a9f4ec9d996b23373a905c88d9a203b220c5c151885ad621f974b5c
  • Pointer size: 129 Bytes
  • Size of remote file: 5.59 kB

Git LFS Details

  • SHA256: 47db284aa93b2335f724f16fe607052973840393762926373cac4ba3a54df6d7
  • Pointer size: 130 Bytes
  • Size of remote file: 47.4 kB
build/web/icons/Icon-maskable-512.png CHANGED

Git LFS Details

  • SHA256: 6aee06cdcab6b2aef74b1734c4778f4421d2da100b0ff9e52b21b55240202929
  • Pointer size: 130 Bytes
  • Size of remote file: 21 kB

Git LFS Details

  • SHA256: 1bd347d36930a178cff9b09973a50151eb419df8641f25acb316e3a671a30c66
  • Pointer size: 131 Bytes
  • Size of remote file: 235 kB
build/web/main.dart.js CHANGED
The diff for this file is too large to render. See raw diff
 
lib/services/websocket_api_service.dart CHANGED
@@ -769,7 +769,6 @@ class WebSocketApiService {
769
  action: 'search',
770
  params: {
771
  'query': query,
772
- 'searchCount': _currentSearchState?.resultCount ?? 0,
773
  'attemptCount': failedAttempts,
774
  },
775
  ),
@@ -923,9 +922,12 @@ class WebSocketApiService {
923
  } else if (action == 'search' && data['success'] == true && data['result'] != null) {
924
  final result = VideoResult.fromJson(data['result'] as Map<String, dynamic>);
925
 
 
 
926
  _pendingRequests[requestId]!.complete(data);
927
 
928
- _searchController.add(result);
 
929
  } else {
930
  // debugPrint('WebSocketApiService: Processing generic response');
931
  _pendingRequests[requestId]!.complete(data);
 
769
  action: 'search',
770
  params: {
771
  'query': query,
 
772
  'attemptCount': failedAttempts,
773
  },
774
  ),
 
922
  } else if (action == 'search' && data['success'] == true && data['result'] != null) {
923
  final result = VideoResult.fromJson(data['result'] as Map<String, dynamic>);
924
 
925
+ // Complete the pending request but don't add to search results here
926
+ // The search results will be handled by the startContinuousSearch method
927
  _pendingRequests[requestId]!.complete(data);
928
 
929
+ // Don't add to search controller here to avoid duplicates
930
+ // _searchController.add(result);
931
  } else {
932
  // debugPrint('WebSocketApiService: Processing generic response');
933
  _pendingRequests[requestId]!.complete(data);
lib/widgets/ai_content_disclaimer.dart CHANGED
@@ -75,10 +75,10 @@ class AiContentDisclaimer extends StatelessWidget {
75
  child: LayoutBuilder(
76
  builder: (context, constraints) {
77
  // Scale text based on container width
78
- final baseSize = constraints.maxWidth / 25;
79
- final smallTextSize = baseSize * 0.7;
80
  final mediumTextSize = baseSize;
81
- final largeTextSize = baseSize * 1.3;
82
 
83
  return Padding(
84
  padding: const EdgeInsets.all(16.0),
@@ -187,7 +187,7 @@ class AiContentDisclaimer extends StatelessWidget {
187
  ),
188
  const SizedBox(height: 20),
189
  Text(
190
- 'artificial intelligence',
191
  style: GoogleFonts.arimo(
192
  fontSize: largeTextSize,
193
  color: Colors.white,
 
75
  child: LayoutBuilder(
76
  builder: (context, constraints) {
77
  // Scale text based on container width
78
+ final baseSize = constraints.maxWidth / 40;
79
+ final smallTextSize = baseSize * 0.8;
80
  final mediumTextSize = baseSize;
81
+ final largeTextSize = baseSize * 1.1;
82
 
83
  return Padding(
84
  padding: const EdgeInsets.all(16.0),
 
187
  ),
188
  const SizedBox(height: 20),
189
  Text(
190
+ 'a generative AI model',
191
  style: GoogleFonts.arimo(
192
  fontSize: largeTextSize,
193
  color: Colors.white,
lib/widgets/video_card.dart CHANGED
@@ -131,6 +131,8 @@ class VideoCard extends StatelessWidget {
131
  fit: StackFit.expand,
132
  children: [
133
  _buildThumbnail(),
 
 
134
  Positioned(
135
  right: 8,
136
  top: 8,
@@ -157,6 +159,7 @@ class VideoCard extends StatelessWidget {
157
  ),
158
  ),
159
  ),
 
160
  ],
161
  ),
162
  ),
@@ -192,11 +195,16 @@ class VideoCard extends StatelessWidget {
192
  overflow: TextOverflow.ellipsis,
193
  ),
194
  const SizedBox(height: 4),
195
- const Text(
196
- 'Generated using LTX Video',
197
- style: TextStyle(
198
- color: AiTubeColors.onSurfaceVariant,
199
- fontSize: 12,
 
 
 
 
 
200
  ),
201
  ),
202
  ],
 
131
  fit: StackFit.expand,
132
  children: [
133
  _buildThumbnail(),
134
+ /*
135
+ Will be used in the future release
136
  Positioned(
137
  right: 8,
138
  top: 8,
 
159
  ),
160
  ),
161
  ),
162
+ */
163
  ],
164
  ),
165
  ),
 
195
  overflow: TextOverflow.ellipsis,
196
  ),
197
  const SizedBox(height: 4),
198
+ SizedBox(
199
+ height: 36, // Approximately height for 3 lines of text with fontSize 12
200
+ child: Text(
201
+ video.description,
202
+ style: const TextStyle(
203
+ color: AiTubeColors.onSurfaceVariant,
204
+ fontSize: 12,
205
+ ),
206
+ maxLines: 3,
207
+ overflow: TextOverflow.ellipsis,
208
  ),
209
  ),
210
  ],
web/aitube.png ADDED

Git LFS Details

  • SHA256: 002d55416ccd330cc1db5b6b42be4aa27c13144859ec495b6430b7a452df779f
  • Pointer size: 131 Bytes
  • Size of remote file: 334 kB
web/aitube.svg ADDED
web/favicon.png CHANGED

Git LFS Details

  • SHA256: 7ab2525f4b86b65d3e4c70358a17e5a1aaf6f437f99cbcc046dad73d59bb9015
  • Pointer size: 128 Bytes
  • Size of remote file: 917 Bytes

Git LFS Details

  • SHA256: 2b073e3ac7cf84804f34bb19ac871566ca4f0ba4ce2f2479a7ea8e4543b38244
  • Pointer size: 131 Bytes
  • Size of remote file: 404 kB
web/icons/Icon-192.png CHANGED

Git LFS Details

  • SHA256: 3dce99077602f70421c1c6b2a240bc9b83d64d86681d45f2154143310c980be3
  • Pointer size: 129 Bytes
  • Size of remote file: 5.29 kB

Git LFS Details

  • SHA256: 47db284aa93b2335f724f16fe607052973840393762926373cac4ba3a54df6d7
  • Pointer size: 130 Bytes
  • Size of remote file: 47.4 kB
web/icons/Icon-512.png CHANGED

Git LFS Details

  • SHA256: baccb205ae45f0b421be1657259b4943ac40c95094ab877f3bcbe12cd544dcbe
  • Pointer size: 129 Bytes
  • Size of remote file: 8.25 kB

Git LFS Details

  • SHA256: 1bd347d36930a178cff9b09973a50151eb419df8641f25acb316e3a671a30c66
  • Pointer size: 131 Bytes
  • Size of remote file: 235 kB
web/icons/Icon-maskable-192.png CHANGED

Git LFS Details

  • SHA256: d2c842e22a9f4ec9d996b23373a905c88d9a203b220c5c151885ad621f974b5c
  • Pointer size: 129 Bytes
  • Size of remote file: 5.59 kB

Git LFS Details

  • SHA256: 47db284aa93b2335f724f16fe607052973840393762926373cac4ba3a54df6d7
  • Pointer size: 130 Bytes
  • Size of remote file: 47.4 kB
web/icons/Icon-maskable-512.png CHANGED

Git LFS Details

  • SHA256: 6aee06cdcab6b2aef74b1734c4778f4421d2da100b0ff9e52b21b55240202929
  • Pointer size: 130 Bytes
  • Size of remote file: 21 kB

Git LFS Details

  • SHA256: 1bd347d36930a178cff9b09973a50151eb419df8641f25acb316e3a671a30c66
  • Pointer size: 131 Bytes
  • Size of remote file: 235 kB