xiaoyao9184 commited on
Commit
eb0bbf9
·
verified ·
1 Parent(s): b782461

Synced repo using 'sync_with_huggingface' Github Action

Browse files

original:
- remote: "https://github.com/xiaoyao9184/convert-to-rkllm"
- commit: "ca00a54835367e950dc5fefb69185fb5616253f1"
sync_with_huggingface:
- repository: ""
- ref: ""

Files changed (6) hide show
  1. README.md +8 -6
  2. app.py +623 -0
  3. dataset.json +1 -0
  4. mapping.json +138 -0
  5. requirements.txt +3 -0
  6. run.py +7 -0
README.md CHANGED
@@ -1,12 +1,14 @@
1
  ---
2
- title: Convert To Rkllm
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.35.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Convert To RKLLM
3
+ emoji: 🚲
4
+ colorFrom: yellow
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 5.34.2
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
+ short_description: 'Convert HuggingFace models to RKLLM and upload'
12
  ---
13
 
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import builtins
3
+ import logging
4
+ import os
5
+ import sys
6
+ import shutil
7
+ import uuid
8
+ import json
9
+ import re
10
+ import contextvars
11
+ import requests
12
+ import torch
13
+ import gradio as gr
14
+ from huggingface_hub import HfApi, whoami, snapshot_download
15
+ from rkllm.api import RKLLM
16
+ from dataclasses import dataclass, field
17
+ from pathlib import Path
18
+ from typing import Optional, Tuple, Callable
19
+ from enum import Enum
20
+ from tqdm import tqdm
21
+ from contextlib import suppress
22
+
23
+
24
+ class Platform(Enum):
25
+ RK3588 = "RK3588"
26
+ RK3576 = "RK3576"
27
+ RK3562 = "RK3562"
28
+
29
+ @dataclass
30
+ class Config:
31
+ """Application configuration."""
32
+
33
+ _id: Optional[str] = field(default=None, init=False)
34
+ _logger: Optional[logging.Logger] = field(default=None, init=False)
35
+ _logger_path: Optional[Path] = field(default=None, init=False)
36
+
37
+ hf_token: str
38
+ hf_username: str
39
+ is_using_user_token: bool
40
+ ignore_converted: bool = False
41
+ ignore_errors: bool = False
42
+
43
+ hf_base_url: str = "https://huggingface.co"
44
+ output_path: Path = Path("./models")
45
+ cache_path: Path = Path("./cache")
46
+ log_path: Path = Path("./logs")
47
+ mapping_path: Path = Path(os.path.join(os.path.dirname(__file__), "mapping.json"))
48
+ dataset_path: Path = Path(os.path.join(os.path.dirname(__file__), "dataset.json"))
49
+
50
+ @classmethod
51
+ def from_env(cls) -> "Config":
52
+ """Create config from environment variables and secrets."""
53
+ system_token = os.getenv("HF_TOKEN")
54
+
55
+ if system_token and system_token.startswith("/run/secrets/") and os.path.isfile(system_token):
56
+ with open(system_token, "r") as f:
57
+ system_token = f.read().strip()
58
+
59
+ hf_username = (
60
+ os.getenv("SPACE_AUTHOR_NAME") or whoami(token=system_token)["name"]
61
+ )
62
+
63
+ output_dir = os.getenv("OUTPUT_DIR") or "./models"
64
+ cache_dir = os.getenv("HUGGINGFACE_HUB_CACHE") or os.getenv("CACHE_DIR") or "./cache"
65
+ log_dir = os.getenv("LOG_DIR") or "./logs"
66
+ mapping_json = os.getenv("MAPPING_JSON") or Path(os.path.join(os.path.dirname(__file__), "mapping.json"))
67
+ dataset_json = os.getenv("DATASET_JSON") or Path(os.path.join(os.path.dirname(__file__), "dataset.json"))
68
+
69
+ output_path = Path(output_dir)
70
+ output_path.mkdir(parents=True, exist_ok=True)
71
+ cache_path = Path(cache_dir)
72
+ cache_path.mkdir(parents=True, exist_ok=True)
73
+ log_path = Path(log_dir)
74
+ log_path.mkdir(parents=True, exist_ok=True)
75
+ mapping_path = Path(mapping_json)
76
+ mapping_path.parent.mkdir(parents=True, exist_ok=True)
77
+ dataset_path = Path(dataset_json)
78
+ dataset_path.parent.mkdir(parents=True, exist_ok=True)
79
+
80
+ return cls(
81
+ hf_token=system_token,
82
+ hf_username=hf_username,
83
+ is_using_user_token=False,
84
+ ignore_converted=os.getenv("IGNORE_CONVERTED", "false") == "true",
85
+ ignore_errors=os.getenv("IGNORE_ERRORS", "false") == "true",
86
+ output_path=output_path,
87
+ cache_path=cache_path,
88
+ log_path=log_path,
89
+ mapping_path=mapping_path,
90
+ dataset_path=dataset_path
91
+ )
92
+
93
+ @property
94
+ def id(self):
95
+ if not self._id:
96
+ self._id = str(uuid.uuid4())
97
+ return self._id
98
+
99
+ @property
100
+ def logger(self) -> logging.Logger:
101
+ """Get logger."""
102
+ if not self._logger:
103
+ logger = logging.getLogger(self.id)
104
+ logger.setLevel(logging.INFO)
105
+ if not logger.handlers:
106
+ handler = logging.FileHandler(self.logger_path)
107
+ handler.setFormatter(logging.Formatter("[%(levelname)s] - %(message)s"))
108
+ logger.addHandler(handler)
109
+ logger.propagate = False
110
+ self._logger = logger
111
+ return self._logger
112
+
113
+ @property
114
+ def logger_path(self) -> Path:
115
+ """Get logger path."""
116
+ if not self._logger_path:
117
+ logger_path = self.log_path / f"{self.id}.log"
118
+ self._logger_path = logger_path
119
+ return self._logger_path
120
+
121
+ def token(self, user_token):
122
+ """Update token."""
123
+ if user_token:
124
+ hf_username = whoami(token=user_token)["name"]
125
+ else:
126
+ hf_username = (
127
+ os.getenv("SPACE_AUTHOR_NAME") or whoami(token=self.hf_token)["name"]
128
+ )
129
+
130
+ hf_token = user_token or self.hf_token
131
+
132
+ if not hf_token:
133
+ raise ValueError(
134
+ "When the user token is not provided, the system token must be set."
135
+ )
136
+
137
+ self.hf_token = hf_token
138
+ self.hf_username = hf_username
139
+ self.is_using_user_token = bool(user_token)
140
+
141
+ class ProgressLogger:
142
+ """Logger with progress update."""
143
+
144
+ def __init__(self, logger: logging.Logger, updater: Callable[[int], None]):
145
+ self.logger = logger
146
+ self.updater = updater
147
+ self.last_progress = 1
148
+ self.last_message = None
149
+ self.write_count = 0
150
+
151
+ def update(self, percent):
152
+ if percent >= self.last_progress:
153
+ self.updater(percent - self.last_progress)
154
+ else:
155
+ self.updater(self.last_progress - percent)
156
+ self.last_progress = min(self.last_progress, percent)
157
+
158
+ def print(self, *args, **kwargs):
159
+ self.last_message = " ".join(str(arg) for arg in args)
160
+ if self.logger:
161
+ self.logger.info(self.last_message.removeprefix("\r"))
162
+
163
+ if self.last_message.startswith("\rProgress:"):
164
+ with suppress(Exception):
165
+ percent_str = self.last_message.strip().split()[-1].strip('%')
166
+ percent = float(percent_str)
167
+ self.update(percent)
168
+ self.last_progress = percent
169
+
170
+ def write(self, text, write):
171
+ match = re.search(r"pre-uploaded: \d+/\d+ \(([\d.]+)M/([\d.]+)M\)", text)
172
+ if match:
173
+ with suppress(Exception):
174
+ current = float(match.group(1))
175
+ total = float(match.group(2))
176
+ percent = current / total * 100
177
+ self.update(percent)
178
+ self.write_count += 1
179
+ # 60 count for each second
180
+ if self.write_count > 60:
181
+ self.write_count = 0
182
+ write(text)
183
+
184
+ class RedirectHandler(logging.Handler):
185
+ """Handles logging redirection to progress logger."""
186
+
187
+ def __init__(self, context: contextvars.ContextVar, logger: logging.Logger = None):
188
+ super().__init__(logging.NOTSET)
189
+ self.context = context
190
+ self.logger = logger
191
+
192
+ def emit(self, record: logging.LogRecord):
193
+ progress_logger = self.context.get(None)
194
+
195
+ if progress_logger:
196
+ try:
197
+ progress_logger.logger.handle(record)
198
+ except Exception as e:
199
+ self.logger.debug(f"Failed to redirection log: {e}")
200
+ elif self.logger:
201
+ self.logger.handle(record)
202
+
203
+ class ModelConverter:
204
+ """Handles model conversion and upload operations."""
205
+
206
+ def __init__(self, rkllm: RKLLM, config: Config, context: contextvars.ContextVar):
207
+ self.rkllm = rkllm
208
+ self.config = config
209
+ self.api = HfApi(token=config.hf_token)
210
+ self.context = context
211
+
212
+ def list_tasks(self):
213
+ for platform in PLATFORMS:
214
+ p = Platform(platform)
215
+ name_params_map = PLATFORM_PARAM_MAPPING.get(p, {})
216
+ for name in name_params_map.keys():
217
+ yield {
218
+ f"{name}": {
219
+ "🔁 Conversion": "⏳",
220
+ "📤 Upload": "⏳"
221
+ }
222
+ }
223
+
224
+ def convert_model(
225
+ self, input_model_id: str, output_model_id: str, progress_updater: Callable[[int], None]
226
+ ) -> Tuple[bool, Optional[str]]:
227
+ """Convert the model to RKLLM format."""
228
+ output_dir = str(self.config.output_path.absolute() / output_model_id)
229
+
230
+ yield f"🧠 Model id: {output_model_id}"
231
+
232
+ for platform in (progress_provider := tqdm(PLATFORMS, disable=False)):
233
+ progress_provider.set_description(f" Platform: {platform}")
234
+
235
+ p = Platform(platform)
236
+ name_params_map = PLATFORM_PARAM_MAPPING.get(p, {})
237
+
238
+ for name in name_params_map.keys():
239
+ output_path = os.path.join(
240
+ output_dir,
241
+ name
242
+ )
243
+ qconfig = name_params_map[name]
244
+
245
+ try:
246
+ yield {
247
+ f"{name}": {
248
+ "🔁 Conversion": "🟢"
249
+ }
250
+ }
251
+ Path(output_path).mkdir(parents=True, exist_ok=True)
252
+ self.context.set(ProgressLogger(self.config.logger, progress_updater))
253
+ self.export_model(
254
+ repo_id=input_model_id,
255
+ output_path=os.path.join(output_path, "model.rkllm"),
256
+ **qconfig
257
+ )
258
+ with open(os.path.join(output_path, "param.json"), "w") as f:
259
+ json.dump(qconfig, f, indent=4)
260
+ yield {
261
+ f"{name}": {
262
+ "🔁 Conversion": "✅"
263
+ }
264
+ }
265
+ except Exception as e:
266
+ yield {
267
+ f"{name}": {
268
+ "🔁 Conversion": "❌"
269
+ }
270
+ }
271
+ if self.config.ignore_errors:
272
+ yield f"🆘 `{name}` Conversion failed: {e}"
273
+ else:
274
+ raise e
275
+ return output_dir
276
+
277
+ def export_model(
278
+ self,
279
+ repo_id: str,
280
+ output_path: str,
281
+ dataset: str = "./data_quant.json",
282
+ qparams: dict = None,
283
+ optimization_level: int = 1,
284
+ target_platform: str = "RK3588",
285
+ quantized_dtype: str = "W8A8",
286
+ quantized_algorithm: str = "normal",
287
+ num_npu_core: int = 3,
288
+ max_context: int = 4096
289
+ ):
290
+ input_path = snapshot_download(repo_id=repo_id)
291
+
292
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
293
+ ret = self.rkllm.load_huggingface(
294
+ model=input_path,
295
+ model_lora=None,
296
+ device=device,
297
+ dtype="float32",
298
+ custom_config=None,
299
+ load_weight=True)
300
+ if ret != 0:
301
+ raise Exception(f"Load model failed: {ret}")
302
+
303
+ ret = self.rkllm.build(
304
+ do_quantization=True,
305
+ optimization_level=optimization_level,
306
+ quantized_dtype=quantized_dtype,
307
+ quantized_algorithm=quantized_algorithm,
308
+ target_platform=target_platform,
309
+ num_npu_core=num_npu_core,
310
+ extra_qparams=qparams,
311
+ dataset=dataset,
312
+ hybrid_rate=0,
313
+ max_context=max_context)
314
+ if ret != 0:
315
+ raise Exception(f"Build model failed: {ret}")
316
+
317
+ ret = self.rkllm.export_rkllm(output_path)
318
+ if ret != 0:
319
+ raise Exception(f"Export model failed: {ret}")
320
+
321
+ def upload_model(
322
+ self, input_model_id: str, output_model_id: str, progress_updater: Callable[[int], None]
323
+ ) -> Optional[str]:
324
+ """Upload the converted model to Hugging Face."""
325
+ model_folder_path = self.config.output_path / output_model_id
326
+ hf_model_url = f"{self.config.hf_base_url}/{output_model_id}"
327
+
328
+ try:
329
+ self.api.create_repo(output_model_id, exist_ok=True, private=False)
330
+ yield f"🤗 Hugging Face model [{output_model_id}]({hf_model_url})"
331
+
332
+ readme_path = f"{model_folder_path}/README.md"
333
+ if not os.path.exists(readme_path):
334
+ with open(readme_path, "w") as file:
335
+ file.write(self.generate_readme(input_model_id))
336
+ self.context.set(ProgressLogger(self.config.logger, progress_updater))
337
+ self.api.upload_file(
338
+ repo_id=output_model_id,
339
+ path_or_fileobj=readme_path,
340
+ path_in_repo="README.md"
341
+ )
342
+ yield f"🪪 Model card [README.md]({hf_model_url}/blob/main/README.md)"
343
+
344
+ for platform in (progress_provider := tqdm(PLATFORMS, disable=False)):
345
+ progress_provider.set_description(f" Platform: {platform}")
346
+
347
+ p = Platform(platform)
348
+ name_params_map = PLATFORM_PARAM_MAPPING.get(p, {})
349
+
350
+ for name in name_params_map.keys():
351
+ folder_path = str(model_folder_path)
352
+ allow_patterns = os.path.join(
353
+ name,
354
+ "**"
355
+ )
356
+
357
+ try:
358
+ yield {
359
+ f"{name}": {
360
+ "📤 Upload": "🟢"
361
+ }
362
+ }
363
+ self.context.set(ProgressLogger(self.config.logger, progress_updater))
364
+ for progress_fake in (_ := tqdm(range(100), disable=False)):
365
+ if progress_fake == 0:
366
+ self.api.upload_large_folder(
367
+ repo_id=output_model_id, folder_path=folder_path, allow_patterns=allow_patterns,
368
+ repo_type="model", print_report_every=1
369
+ )
370
+ yield {
371
+ f"{name}": {
372
+ "📤 Upload": "✅"
373
+ }
374
+ }
375
+ except Exception as e:
376
+ yield {
377
+ f"{name}": {
378
+ "📤 Upload": "❌"
379
+ }
380
+ }
381
+ if self.config.ignore_errors:
382
+ yield f"🆘 `{name}` Upload Error: {e}"
383
+ else:
384
+ raise e
385
+ return hf_model_url
386
+ finally:
387
+ shutil.rmtree(model_folder_path, ignore_errors=True)
388
+
389
+ def generate_readme(self, imi: str):
390
+ return (
391
+ "---\n"
392
+ "library_name: rkllm-runtime\n"
393
+ "base_model:\n"
394
+ f"- {imi}\n"
395
+ "---\n\n"
396
+ f"# {imi.split('/')[-1]} (rkllm)\n\n"
397
+ f"This is an rkllm version of [{imi}](https://huggingface.co/{imi}). "
398
+ "It was automatically converted and uploaded using "
399
+ "[this space](https://huggingface.co/spaces/xiaoyao9184/convert-to-rkllm).\n"
400
+ )
401
+
402
+ class MessageHolder:
403
+ """hold messages for model conversion and upload operations."""
404
+
405
+ def __init__(self):
406
+ self.str_messages = []
407
+ self.dict_messages = {}
408
+
409
+ def add(self, msg):
410
+ if isinstance(msg, str):
411
+ self.str_messages.append(msg)
412
+ else:
413
+ # msg: {
414
+ # f"{execution_provider}-{precision}-{name}": {
415
+ # "🔁 Conversion": "⏳",
416
+ # "📤 Upload": "⏳"
417
+ # }
418
+ # }
419
+ for name, value in msg.items():
420
+ if name not in self.dict_messages:
421
+ self.dict_messages[name] = value
422
+ self.dict_messages[name].update(value)
423
+ return self
424
+
425
+ def markdown(self):
426
+ all_keys = list(dict.fromkeys(
427
+ key for value in self.dict_messages.values() for key in value
428
+ ))
429
+
430
+ header = "| Name | " + " | ".join(all_keys) + " |"
431
+ divider = "|------|" + "|".join(["------"] * len(all_keys)) + "|"
432
+ rows = []
433
+ for name, steps in self.dict_messages.items():
434
+ row = [f"`{name}`"]
435
+ for key in all_keys:
436
+ row.append(steps.get(key, ""))
437
+ rows.append("| " + " | ".join(row) + " |")
438
+
439
+ lines = []
440
+ for msg in self.str_messages:
441
+ lines.append("")
442
+ lines.append(msg)
443
+ if rows:
444
+ lines.append("")
445
+ lines.append(header)
446
+ lines.append(divider)
447
+ lines.extend(rows)
448
+
449
+ return "\n".join(lines)
450
+
451
+
452
+ if __name__ == "__main__":
453
+ # default config
454
+ config = Config.from_env()
455
+
456
+ # context progress logger
457
+ progress_logger_ctx = contextvars.ContextVar("progress_logger", default=None)
458
+
459
+ # redirect builtins.print to context progress logger
460
+ def context_aware_print(*args, **kwargs):
461
+ progress_logger = progress_logger_ctx.get(None)
462
+ if progress_logger:
463
+ progress_logger.print(*args, **kwargs)
464
+ else:
465
+ builtins._original_print(*args, **kwargs)
466
+ builtins._original_print = builtins.print
467
+ builtins.print = context_aware_print
468
+
469
+ # redirect sys.stdout.write to context progress logger
470
+ def context_aware_write(text):
471
+ progress_logger = progress_logger_ctx.get(None)
472
+ if progress_logger:
473
+ progress_logger.write(text.rstrip(), sys.stdout._original_write)
474
+ else:
475
+ sys.stdout._original_write(text)
476
+ sys.stdout._original_write = sys.stdout.write
477
+ sys.stdout.write = context_aware_write
478
+
479
+ # setup logger
480
+ root_logger = logging.getLogger()
481
+ root_logger.setLevel(logging.INFO)
482
+ root_logger.addHandler(logging.FileHandler(config.log_path / 'ui.log'))
483
+
484
+ # redirect root logger to context progress logger
485
+ root_handler = RedirectHandler(progress_logger_ctx)
486
+ root_logger.addHandler(root_handler)
487
+ root_logger.info("Gradio UI started")
488
+
489
+ # redirect package logger to context progress logger
490
+ pkg_handler = RedirectHandler(progress_logger_ctx, logging.getLogger(__name__))
491
+ for logger in [logging.getLogger("huggingface_hub.hf_api")]:
492
+ logger.handlers.clear()
493
+ logger.addHandler(pkg_handler)
494
+ logger.setLevel(logger.level)
495
+ logger.propagate = False
496
+
497
+ # setup RKLLM
498
+ rkllm = RKLLM()
499
+
500
+ PLATFORMS = tuple(x.value for x in Platform)
501
+
502
+ PLATFORM_PARAM_MAPPING = {}
503
+
504
+ with open(config.mapping_path, "r") as f:
505
+ data = json.load(f)
506
+ for platform, params in data.items():
507
+ p = Platform(platform)
508
+ PLATFORM_PARAM_MAPPING[p] = {}
509
+ for name, param in params.items():
510
+ param["dataset"] = str(config.dataset_path.absolute())
511
+ PLATFORM_PARAM_MAPPING[p][name] = param
512
+
513
+ with gr.Blocks() as demo:
514
+ gr_user_config = gr.State(config)
515
+ gr.Markdown("## 🤗 Convert HuggingFace Models to RKLLM")
516
+ gr_input_model_id = gr.Textbox(label="Model ID", info="e.g. deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B")
517
+ gr_user_token = gr.Textbox(label="HF Token (Optional)", type="password", visible=False)
518
+ gr_same_repo = gr.Checkbox(label="Upload to same repo (if you own it)", visible=False, info="Do you want to upload the RKLLM weights to the same repository?")
519
+ gr_proceed = gr.Button("Convert and Upload", interactive=False)
520
+ gr_result = gr.Markdown("")
521
+
522
+ gr_input_model_id.change(
523
+ fn=lambda x: [gr.update(visible=x != ""), gr.update(interactive=x != "")],
524
+ inputs=[gr_input_model_id],
525
+ outputs=[gr_user_token, gr_proceed],
526
+ api_name=False
527
+ )
528
+
529
+ def change_user_token(input_model_id, user_hf_token, user_config):
530
+ # update hf_token
531
+ try:
532
+ user_config.token(user_hf_token)
533
+ except Exception as e:
534
+ gr.Error(str(e), duration=5)
535
+ if user_hf_token != "":
536
+ if user_config.hf_username == input_model_id.split("/")[0]:
537
+ return [gr.update(visible=True), user_config]
538
+ return [gr.update(visible=False), user_config]
539
+ gr_user_token.change(
540
+ fn=change_user_token,
541
+ inputs=[gr_input_model_id, gr_user_token, gr_user_config],
542
+ outputs=[gr_same_repo, gr_user_config],
543
+ api_name=False
544
+ )
545
+
546
+ def click_proceed(input_model_id, same_repo, user_config, progress=gr.Progress(track_tqdm=True)):
547
+ try:
548
+ converter = ModelConverter(rkllm, user_config, progress_logger_ctx)
549
+ holder = MessageHolder()
550
+
551
+ input_model_id = input_model_id.strip()
552
+ model_name = input_model_id.split("/")[-1]
553
+ output_model_id = f"{user_config.hf_username}/{model_name}"
554
+
555
+ if not same_repo:
556
+ output_model_id += "-rkllm"
557
+ if not same_repo and converter.api.repo_exists(output_model_id):
558
+ yield gr.update(interactive=True), "This model has already been converted! 🎉"
559
+ if user_config.ignore_converted:
560
+ yield gr.update(interactive=True), "Ignore it, continue..."
561
+ else:
562
+ return
563
+
564
+ # update markdown
565
+ for task in converter.list_tasks():
566
+ yield gr.update(interactive=False), holder.add(task).markdown()
567
+
568
+ # update log
569
+ logger = user_config.logger
570
+ logger_path = user_config.logger_path
571
+ logger.info(f"Log file: {logger_path}")
572
+ yield gr.update(interactive=False), \
573
+ holder.add(f"# 📄 Log file [{user_config.id}](./gradio_api/file={logger_path})").markdown()
574
+
575
+ # update counter
576
+ with suppress(Exception):
577
+ requests.get("https://counterapi.com/api/xiaoyao9184.github.com/view/convert-to-rkllm")
578
+
579
+ # update markdown
580
+ logger.info("Conversion started...")
581
+ gen = converter.convert_model(
582
+ input_model_id, output_model_id, lambda n=-1: progress.update(n)
583
+ )
584
+ try:
585
+ while True:
586
+ msg = next(gen)
587
+ yield gr.update(interactive=False), holder.add(msg).markdown()
588
+ except StopIteration as e:
589
+ output_dir = e.value
590
+ yield gr.update(interactive=True), \
591
+ holder.add(f"🔁 Conversion successful✅! 📁 output to {output_dir}").markdown()
592
+ except Exception as e:
593
+ logger.exception(e)
594
+ yield gr.update(interactive=True), holder.add("🔁 Conversion failed🚫").markdown()
595
+ return
596
+
597
+ # update markdown
598
+ logger.info("Upload started...")
599
+ gen = converter.upload_model(input_model_id, output_model_id, lambda n=-1: progress.update(n))
600
+ try:
601
+ while True:
602
+ msg = next(gen)
603
+ yield gr.update(interactive=False), holder.add(msg).markdown()
604
+ except StopIteration as e:
605
+ output_model_url = f"{user_config.hf_base_url}/{output_model_id}"
606
+ yield gr.update(interactive=True), \
607
+ holder.add(f"📤 Upload successful✅! 📦 Go to [{output_model_id}]({output_model_url}/tree/main)").markdown()
608
+ except Exception as e:
609
+ logger.exception(e)
610
+ yield gr.update(interactive=True), holder.add("📤 Upload failed🚫").markdown()
611
+ return
612
+ except Exception as e:
613
+ root_logger.exception(e)
614
+ yield gr.update(interactive=True), holder.add(str(e)).markdown()
615
+ return
616
+ gr_proceed.click(
617
+ fn=click_proceed,
618
+ inputs=[gr_input_model_id, gr_same_repo, gr_user_config],
619
+ outputs=[gr_proceed, gr_result]
620
+ )
621
+
622
+ if __name__ == "__main__":
623
+ demo.launch(server_name="0.0.0.0", allowed_paths=[os.path.realpath(config.log_path.parent)])
dataset.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"input": "\u5728\u519c\u4e1a\u751f\u4ea7\u4e2d\u88ab\u5f53\u4f5c\u6781\u5176\u91cd\u8981\u7684\u52b3\u52a8\u5bf9\u8c61\u53d1\u6325\u4f5c\u7528\uff0c\u6700\u4e3b\u8981\u7684\u4e0d\u53ef\u66ff\u4ee3\u7684\u57fa\u672c\u751f\u4ea7\u8d44\u6599\u662f\nA. \u519c\u4e1a\u751f\u4ea7\u5de5\u5177\nB. \u571f\u5730\nC. \u52b3\u52a8\u529b\nD. \u8d44\u91d1", "target": "\n\nTo determine the correct answer, let's analyze each option.\n\nFirstly, considering that agricultural production relies heavily on tools and machinery for planting, harvesting, and fertilizing crops. Without these tools, farming operations would be impossible. So, A is a strong candidate.\n\nSecondly, land is essential because it provides the physical space where crops can grow. Without sufficient or good-quality land, farmers cannot cultivate their produce effectively. Therefore, B is also a likely contender.\n\nThirdly, labor refers to the people who actually work in the field\u2014plows, workers, etc.\u2014and without them, farming activities would not occur. Hence,"}, {"input": "\u4e0b\u5217\u884c\u4e3a\u5982\u6ee1\u8db3\u89c4\u5b9a\u6761\u4ef6\uff0c\u5e94\u8ba4\u5b9a\u4e3a\u4f2a\u9020\u8d27\u5e01\u7f6a\u7684\u662f\nA. \u5c06\u82f1\u9551\u63ed\u5c42\u4e00\u5206\u4e3a\u4e8c\nB. \u94f8\u9020\u73cd\u7a00\u53e4\u94b1\u5e01\nC. \u4e34\u6479\u6b27\u5143\u6536\u85cf\nD. \u7528\u9ec4\u91d1\u94f8\u9020\u6d41\u901a\u7684\u7eaa\u5ff5\u91d1\u5e01", "target": "\n\n\u8bf7\u4e00\u6b65\u6b65\u63a8\u7406\u3002\n\n\u9996\u5148\uff0c\u6211\u9700\u8981\u7406\u89e3\u4ec0\u4e48\u662f\u4f2a\u9020\u8d27\u5e01\u7f6a\u3002\u6839\u636e\u6211\u7684\u77e5\u8bc6\uff0c\u4f2a\u9020\u8d27\u5e01\u7f6a\u662f\u6307\u901a\u8fc7\u4f2a\u9020\u3001\u53d8\u76f8\u4f2a\u9020\u8d27\u5e01\u6765\u9003\u907f\u652f\u4ed8\u6216\u9a97\u53d6\u6536\u5165\u7684\u884c\u4e3a\u3002\u8fd9\u79cd\u884c\u4e3a\u901a\u5e38\u5305\u62ec counterfeit money \u7684\u5236\u9020\u548c\u4f20\u64ad\u3002\n\n\u63a5\u4e0b\u6765\uff0c\u6211\u8981\u9010\u4e00\u5206\u6790\u9009\u9879\u4e2d\u7684\u6bcf\u4e00\u4e2a\u884c\u4e3a\u662f\u5426\u7b26\u5408\u4f2a\u9020\u8d27\u5e01\u7f6a\u7684\u5b9a\u4e49\u3002\n\n\u9009\u9879 A\uff1a\u5c06\u82f1\u9551\u63ed\u5c42\u4e00\u5206\u4e3a\u4e8c\u3002\u8fd9\u542c\u8d77\u6765\u50cf\u662f\u5bf9\u82f1\u9551\u8fdb\u884c\u5207\u5272\uff0c\u53ef\u80fd\u6d89\u53ca\u5230 counterfeit coin \u7684\u5236\u9020\u3002\u4e0d\u8fc7\uff0c\u82f1\u9551\u4f5c\u4e3a\u8d27\u5e01\u5355\u4f4d\uff0c\u5176\u9762\u503c\u662f\u82f1\u9551\uff0c\u800c counterfeit coin \u662f\u6307\u5047\u5e01\uff0c\u5373\u5047\u949e\u6216\u5047\u5e01\u3002\u8fd9\u91cc\u63d0\u5230\u7684\u662f\u82f1\u9551\u88ab\u201c\u63ed\u5c42"}, {"input": "\u8bbe\u662f $f(x)$ \u5076\u51fd\u6570\uff0c $\u000barphi(x)$ \u662f\u5947\u51fd\u6570\uff0c \u5219\u4e0b\u5217\u51fd\u6570(\u5047\u8bbe\u90fd\u6709\u610f\u4e49)\u4e2d\u662f\u5947\u51fd\u6570\u7684\u662f ( ).\nA. $f[f(x)]$\nB. $\u000barphi[\u000barphi(x)]$\nC. $\u000barphi[f(x)]$\nD. $f[\u000barphi(x)]$", "target": " \nE. f(f(f(x))) \nF. arphi(arphi(arphi(x))) \nG. arphi(f(f(x)))\nH. f(arphi(arphi(arphi(x)))) \nI. f(arphi(arphi(pi))).\n**\n\n**\u6211\u9700\u8981\u627e\u51fa\u54ea\u4e2a\u9009\u9879\u4e2d\u7684\u51fd\u6570\u662f\u5947\u51fd\u6570\u3002**\n\u597d\u7684\uff0c\u6211\u73b0\u5728\u6765\u4ed4\u7ec6\u601d\u8003\u8fd9\u4e2a\u95ee\u9898\u3002\u9898\u76ee\u8bf4f(x)\u662f\u4e00\u4e2a\u5076\u51fd\u6570\uff0c\u800c\u03c6(x)\u662f\u4e00\u4e2a\u5947\u51fd\u6570\u3002\u7136\u540e\u7ed9\u4e86\u516d\u4e2a\u9009\u9879\uff0c\u8981\u6c42\u5224\u65ad\u54ea\u4e00\u4e2a\u662f\u5947\u51fd\u6570\u3002\n\n\u9996\u5148\uff0c\u56de\u987e\u4e00\u4e0b\u5947\u51fd\u6570\u548c\u5076\u51fd\u6570\u7684\u5b9a\u4e49\uff1a\n\n- \u5076\u51fd\u6570\u6ee1\u8db3f(-x) = f(x)\uff0c\u56fe\u50cf"}, {"input": "def fizz_buzz(n: int): \"\"\"Return the number of times the digit 7 appears in integers less than n which are divisible by 11 or 13. >>> fizz_buzz(50) 0 >>> fizz_buzz(78) 2 >>> fizz_buzz(79) 3 \"\"\"\u5df2\u77e5 ${ }_s p_{10}=0.4$\uff0c \u4e14 $\\mu_x=0.01+b x\uff0c x \\geqslant 0$\uff0c \u5219 $b$ \u7b49\u4e8e $(\\quad$ \u3002\nA. -0.05\nB. -0.014\nC. -0.005\nD. 0.014", "target": "\nE. 0.014\nF. 0.014\nG. 0.014\nH. 0.014\nI. 0.014\nJ. 0.014\nK. 0.014\nL. 0.014\nM. 0.014\nN. 0.014\nO. 0.014\nP. 0.014\nQ. 0.014\nR. 0.014\nS"}, {"input": "Imagine you are participating in a race with a group of people. If you have just overtaken the second person, what's your current position? Where is the person you just overtook?", "target": " Let me think.\n\nWait, maybe I should try to model this situation mathematically.\nOkay, so let's assume that all participants are moving at constant speeds. The first person starts from the starting line, and each subsequent person has a higher speed than the previous one. So, person 1: v1 = 0 (starting), person 2: v2 > v1, person 3: v3 > v2, etc.\n\nAt time t=0, all participants start running. After some time t, we can calculate their positions as:\n\nPosition1(t) = v1 * t + s1\nBut"}, {"input": "\u4f60\u662f\u4e00\u4e2a\u624b\u673a\u5185\u8d1f\u8d23\u65e5\u7a0b\u7ba1\u7406\u7684\u667a\u80fd\u52a9\u624b\uff0c\u4f60\u8981\u57fa\u4e8e\u7528\u6237\u7ed9\u5b9a\u7684\u76ee\u6807\u65e5\u7a0b\u4fe1\u606f\uff0c\u7efc\u5408\u8003\u8651\u65e5\u7a0b\u5e93\u7684\u884c\u7a0b\uff0c\u8054\u60f3\u4e00\u4e9b\u53ef\u4ee5\u4f53\u73b0\u4eba\u6587\u5173\u6000\uff0c\u5b9e\u7528\uff0c\u5e76\u4e14\u53ef\u4ee5\u7ed9\u7528\u6237\u5e26\u6765\u60ca\u559c\u7684\u5b50\u65e5\u7a0b\u63d0\u9192\u3002", "target": "\u8bf7\u7ed3\u5408\u5b9e\u9645\u751f\u6d3b\u7ecf\u9a8c\uff0c\u7ed9\u51fa\u5efa\u8bae\u3002\n\n\u76ee\u6807\u65e5\u7a0b\u4fe1\u606f\uff1a\n\u4eca\u5929\uff082023\u5e7410\u670815\u65e5\uff09\u5230\u660e\u5929\uff082023\u5e7410\u670816\u65e5\uff09\uff0c\u5171\u4e24\u5929\u3002\n\u65f6\u95f4\u5b89\u6392\uff1a\n\u4e0a\u53489:00-12:00\n\u4e0b\u53484:00-7:00\n\u665a\u4e0a7:00-10:00\n\n\u7528\u6237\u5e0c\u671b\u5728\u8fd9\u4e9b\u65f6\u95f4\u6bb5\u5185\uff0c\u627e\u5230\u4e00\u4e9b\u5b50\u65e5\u7a0b\u63d0\u9192\uff0c\u6ee1\u8db3\u4ee5\u4e0b\u6761\u4ef6\uff1a\n\n1. \u4fdd\u8bc1\u7528\u6237\u80fd\u987a\u5229\u5b8c\u6210\u4efb\u52a1\u6216\u6d3b\u52a8\uff0c\u4e0d\u4f1a\u611f\u5230\u538b\u529b\u8fc7\u5927\u6216\u8005\u592a\u8f7b\u677e\u3002\n2. \u63d0\u4f9b\u4e00\u4e9b\u53ef\u4ee5"}, {"input": "\u7ed9\u5b9a\u4ee5\u4e0bPython\u4ee3\u7801\uff0c\u8bf7\u6539\u5199\u5b83\uff0c\u4f7f\u7528\u5217\u8868\u89e3\u6790\u6765\u5b8c\u6210\u76f8\u540c\u7684\u529f\u80fd\u3002\n\nsquares = []\n\nfor i in range(10): \n\n squares.append(i**2)\n\nprint(squares)", "target": " \n\n</think>\n\n\u8981\u6539\u5199\u7ed9\u5b9a\u7684 Python \u4ee3\u7801\uff0c\u5e76\u4f7f\u7528\u5217\u8868\u89e3\u6790\u6765\u5b8c\u6210\u76f8\u540c\u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\uff1a\n\n1. **\u5b9a\u4e49\u4e00\u4e2a\u7a7a\u5217\u8868 `squares`**\uff1a\u521d\u59cb\u5316\u4e00\u4e2a\u7a7a\u5217\u8868\u3002\n2. **\u904d\u5386\u8303\u56f4\u4ece 1 \u5230 9 \u7684\u6240\u6709\u6574\u6570**\uff1a\u4f7f\u7528 `range(10)` \u6765\u751f\u6210\u8fd9\u4e2a\u8303\u56f4\u5185\u7684\u6574\u6570\u3002\n3. **\u8ba1\u7b97\u6bcf\u4e2a\u6574\u6570\u7684\u5e73\u65b9\u5e76\u6dfb\u52a0\u5230\u5217\u8868\u4e2d**\uff1a\u5bf9\u4e8e\u6bcf\u4e2a\u6574\u6570 `i`\uff0c\u8ba1\u7b97\u5176\u5e73\u65b9\u5e76\u5c06\u5176\u6dfb\u52a0\u5230 `squares` \u5217\u8868\u4e2d\u3002\n4. **\u6253\u5370\u6700\u7ec8\u7684\u5217\u8868\u5185\u5bb9"}, {"input": "Some people got on a bus at the terminal. At the first bus stop, half of the people got down and 4 more people got in. Then at the second bus stop, 6 people got down and 8 more got in. If there were a total of 25 people heading to the third stop, how many people got on the bus at the terminal?", "target": " Let's denote by x the number of people who got on the bus at the terminal.\n\nOkay, so I need to figure out how many people got on the bus at the terminal, which is denoted as x. The problem says that after several stops, there are 25 people heading to the third stop. So, we have to model the number of people getting on and off the bus at each stop and set up an equation based on that.\n\nLet me try to break it down step by step.\n\nFirst, some people get on the bus at the terminal. That's x people. Then, they go through the first bus"}, {"input": "\u4e0b\u5217\u53e5\u5b50\u4e2d\u5b58\u5728\u6b67\u4e49\u7684\u4e00\u53e5\u662f\uff08\uff09A\uff1a\u4e0a\u7ea7\u8981\u6c42\u6211\u4eec\u6309\u65f6\u5b8c\u6210\u4efb\u52a1B\uff1a\u8001\u5e08\u6ee1\u610f\u5730\u671d\u4ed6\u770b\u4e86\u4e00\u773cC\uff1a\u6211\u770b\u89c1\u4f60\u90a3\u5e74\u624d\u5341\u5c81D\uff1a\u5979\u7684\u4e00\u53e5\u8bdd\u8bf4\u5f97\u5927\u5bb6\u90fd\u7b11\u4e86", "target": "\n</think>\n\n\u55ef\uff0c\u8ba9\u6211\u6765\u4ed4\u7ec6\u5206\u6790\u4e00\u4e0b\u8fd9\u4e2a\u95ee\u9898\u3002\u9996\u5148\uff0c\u9898\u76ee\u95ee\u7684\u662f\u54ea\u4e00\u4e2a\u53e5\u5b50\u5b58\u5728\u6b67\u4e49\uff0c\u4e5f\u5c31\u662f\u8bf4\uff0c\u5b83\u53ef\u80fd\u88ab\u7406\u89e3\u6210\u51e0\u79cd\u4e0d\u540c\u7684\u610f\u601d\u3002\n\n\u9009\u9879\uff1a\nA: \u4e0a\u7ea7\u8981\u6c42\u6211\u4eec\u6309\u65f6\u5b8c\u6210\u4efb\u52a1 \nB: \u8001\u5e08\u6ee1\u610f\u5730\u671d\u4ed6\u770b\u4e86\u4e00\u773c \nC: \u6211\u770b\u89c1\u4f60\u90a3\u5e74\u624d\u5341\u5c81 \nD: \u5979\u7684\u4e00\u53e5\u8bdd\u8bf4\u5f97\u5927\u5bb6\u90fd\u7b11\u4e86 \n\n\u73b0\u5728\u9010\u4e00\u5206\u6790\uff1a\n\n**\u9009\u9879A\uff1a\u4e0a\u7ea7\u8981\u6c42\u6211\u4eec\u6309\u65f6\u5b8c\u6210\u4efb\u52a1**\n- \u8fd9\u4e2a\u53e5\u5b50\u7684\u610f\u601d\u5f88\u660e\u663e\uff0c\u5c31\u662f\u4e0a\u7ea7\u5bf9\u6211\u4eec\u7684\u4efb\u52a1\u6709\u4e25\u683c\u7684\u8981\u6c42\u3002\n- \u6ca1\u6709\u6b67\u4e49\uff0c\u76f4\u63a5\u4f20\u8fbe\u51fa\u6307\u4ee4\u6216\u8d23\u4efb\u3002\n\n"}, {"input": "What is the coefficient of $x^2y^6$ in the expansion of $\\left(\frac{3}{5}x-\frac{y}{2}\right)^8$? Express your answer as a common fraction.", "target": " To solve this, you may use any method, but please put your...\nTo solve this, you may use any method, but please put your... To solve this, you may use any method, but please put your...\n\nTo solve this, you may use any method, but please put your...\n\nTo solve this, you may use any method, but please put your...\n\nTo solve this, you may use any method, but please put your...\n\nTo solve this, you may use any method, but please put your...\n\nTo solve this, you may use any method, but please put your...\n\nTo solve this, you may use any"}, {"input": "I love Woudenberg Koffiemok Trots op Goedkoop I love Woudenberg Koffiemok Trots op Een te gekke koffiemok die je niet mag missen als je in Woudenberg woont. Productcode: 29979 - bbweb", "target": ".n2w2.b3w2\n\nWait, that's not right. The product code is 29979 - bbweb.n2w2.b3w2.\n\nHmm, okay, so the first part is bbweb, which is a software for simulating football matches. Then n2w2 and b3w2 are specific ball types from Woudenberg Koffiematting. So, to get the product code, we combine these three parts with numbers indicating their positions in the table of ball types.\n\nBut wait, I'm confused about how exactly the numbering works. Is it based on the type"}, {"input": "Aussie Speedo Guy is a Bisexual Aussie Guy who loves speedos. \u00bb Indoor Pool TwinksAussieSpeedoGuy.org: AussieSpeedoGuy.org Members Blog No User Responded in \" Indoor Pool Twinks \"", "target": " - So, I'm trying to figure out how to calculate the probability that a user will respond in this specific thread.\n\nGiven that there are 100 users on the website, and each user has an independent probability of responding with 25% chance. The response is only made if the user is active (i.e., not blocked). Each user's response is independent of others. What is the probability that exactly two users respond?\n\nTo clarify, the problem is about calculating the probability that exactly two users respond in the given scenario.\nAlright, so I need to find the probability that exactly two users respond in this scenario. Let"}, {"input": "\u5728\u57ce\u5e02\u591c\u665a\u7684\u9713\u8679\u706f\u4e0b\uff0c\u8f66\u6c34\u9a6c\u9f99\uff0c\u60a8\u80fd\u4e3a\u6b64\u521b\u4f5c\u4e03\u8a00\u7edd\u53e5\u5417\uff1f\u5173\u952e\u8bcd\uff1a\u591c\u665a\uff0c\u9713\u8679\u3002", "target": "\u8bf7\u8be6\u7ec6\u8bf4\u660e\u4f60\u7684\u601d\u8003\u8fc7\u7a0b\u548c\u6b65\u9aa4\u3002\n</think>\n\n\u5f53\u7136\u53ef\u4ee5\uff01\u4ee5\u4e0b\u662f\u6211\u4e3a\u201c\u57ce\u5e02\u591c\u665a\u7684\u9713\u8679\u706f\u4e0b\uff0c\u8f66\u6c34\u9a6c\u9f99\u201d\u521b\u4f5c\u7684\u4e00\u9996\u4e03\u8a00\u7edd\u53e5\uff1a\n\n---\n\n**\u591c\u8272\u9713\u8679\u6620\u7167\u57ce\u4e2d\u8857** \n**\u8f66\u6c34\u9a6c\u9f99\u968f\u98ce\u8d77** \n\n---\n\n\u8fd9\u9996\u8bd7\u901a\u8fc7\u63cf\u7ed8\u57ce\u5e02\u7684\u9713\u8679\u706f\u5149\u548c\u8f66\u6c34\u9a6c\u9f99\u7684\u666f\u8c61\uff0c\u5c55\u73b0\u4e86\u591c\u665a\u7684\u7e41\u534e\u4e0e\u5b81\u9759\u3002\u5e0c\u671b\u4f60\u559c\u6b22\u8fd9\u9996\u8bd7\uff01"}, {"input": "\u4ee5\u4e0b\u662f\u5173\u4e8e\u7ecf\u6d4e\u5b66\u7684\u5355\u9879\u9009\u62e9\u9898\uff0c\u8bf7\u4eceA\u3001B\u3001C\u3001D\u4e2d\u9009\u62e9\u6b63\u786e\u7b54\u6848\u5bf9\u5e94\u7684\u9009\u9879\u3002\n\u9898\u76ee\uff1a\u5f53\u957f\u671f\u5747\u8861\u65f6\uff0c\u5b8c\u5168\u7ade\u4e89\u4f01\u4e1a\u603b\u662f\nA. \u7ecf\u6d4e\u5229\u6da6\u5927\u4e8e\u96f6\nB. \u6b63\u5e38\u5229\u6da6\u4e3a\u96f6\nC. \u7ecf\u6d4e\u5229\u6da6\u5c0f\u4e8e\u96f6\nD. \u7ecf\u6d4e\u5229\u6da6\u4e3a\u96f6\n\u7b54\u6848\u662f:", "target": " D\n\n\u95ee\u9898\u63cf\u8ff0\uff1a\u4e3a\u4ec0\u4e48\u957f\u671f\u5747\u8861\u65f6\uff0c\u5b8c\u5168\u7ade\u4e89\u4f01\u4e1a\u603b\u662f\u7ecf\u6d4e\u5229\u6da6\u4e3a\u96f6\uff1f\n\n\u6211\u9700\u8981\u8be6\u7ec6\u601d\u8003\u8fd9\u4e2a\u95ee\u9898\uff0c\u5e76\u7ed9\u51fa\u4e00\u4e2a\u6e05\u6670\u7684\u89e3\u91ca\u3002\u8bf7\u6309\u7167\u4ee5\u4e0b\u8981\u6c42\uff1a\n1. \u7528\u4e2d\u6587\u56de\u7b54\u95ee\u9898\u3002\n2. \u4f7f\u7528\u201c_\u201d\u6765\u8868\u793a\u6807\u9898\u4e2d\u7684\u7279\u6b8a\u7b26\u53f7\uff0c\u5982\u201c\u603b\u9700\u6c42\u66f2\u7ebf\u201d\u7b49\u3002\n3. \u5bf9\u4e8e\u6bcf\u4e2a\u6b65\u9aa4\u6216\u601d\u8003\u70b9\uff0c\u4f7f\u7528\u201c_\u201d\u6765\u8868\u793a\u6807\u9898\u4e2d\u7684\u7279\u6b8a\u7b26\u53f7\u3002\n4. \u6bcf\u4e2a\u601d\u8003\u70b9\u540e\uff0c\u4f7f\u7528\u201c_\u201d\u6765\u8868\u793a\u7f16\u53f7\u3002\n5. \u6700\u7ec8\u7b54\u6848\u653e\u5728**_**\u7684\u4e0b\u65b9\u3002\n\n\u597d\u7684\uff0c\u6211\u73b0\u5728\u8981\u4ed4\u7ec6\u601d\u8003\u8fd9\u4e2a\u95ee\u9898\uff1a\u201c\u4e3a\u4ec0\u4e48\u5728\u957f\u671f\u5747\u8861"}, {"input": "\u4e0b\u5217\u53e5\u4e2d\uff0c\u201c\u662f\u201d\u5145\u5f53\u524d\u7f6e\u5bbe\u8bed\u7684\u4e00\u53e5\u662f\nA. \u5982\u6709\u4e0d\u7531\u6b64\u8005\uff0c\u5728\u57f7\u8005\u53bb\uff0c\u8846\u4ee5\u7109\u6b83\uff0c\u662f\u8b02\u5c0f\u5eb7\u3002\nB. \u5b50\u66f0\uff1a\u654f\u800c\u597d\u5b78\uff0c\u4e0d\u4e0b\u6065\u554f\uff0c\u662f\u4ee5\u8b02\u4e4b\u6587\u4e5f\u3002\nC. \u662f\u4e43\u5176\u6240\u4ee5\u5343\u842c\u81e3\u800c\u7121\u6578\u8005\u4e5f\u3002\nD. \u9444\u540d\u5668\uff0c\u85cf\u5bf6\u8ca1\uff0c\u56fa\u6c11\u4e4b\u6b84\u75c5\u662f\u5f85\u3002", "target": "\u662f\u8c13\u4e4b\u6587\u4e5f\u3002\n\n\u8bf7\u4e00\u6b65\u6b65\u63a8\u7406\uff0c\u4f60\u76f4\u5230\u89e3\u51b3\u4e3a\u6b62\n\n\u89e3\u9898\u601d\u8def\uff1a\n\u9996\u5148\uff0c\u6211\u9700\u8981\u7406\u89e3\u9898\u76ee\u5728\u95ee\u4ec0\u4e48\u3002\u201c\u2018\u662f\u2019\u4f5c\u4e3a\u524d\u5bbe\u7684\u8c13\u8bed\u52a8\u8bcd\u77ed\u8bed\u662f\u5426\u6b63\u786e\u4f7f\u7528\u4e86\u2018\u662f\u2019\u4f5c\u4e3a\u4e3b\u8bed\u3002\u201d\u6362\u53e5\u8bdd\u8bf4\uff0c\u6211\u8981\u5224\u65ad\u9009\u9879\u4e2d\u7684\u53e5\u5b50\uff0c\u5f53\u201c\u662f\u201d\u4f5c\u4e3a\u5bbe\u8bed\u65f6\uff0c\u662f\u5426\u6b63\u786e\u4f7f\u7528\u4e86\u201c\u662f\u201d\u4f5c\u4e3a\u4e3b\u8bed\u3002\n\n\u63a5\u4e0b\u6765\uff0c\u6211\u4f1a\u9010\u4e00\u5206\u6790\u6bcf\u4e2a\u9009\u9879\uff1a\n\n\u9009\u9879A\uff1a\u201c\u5982\u6709\u4e0d\u7531\u6b64\u8005\uff0c\u5728\u6267\u8005\u53bb\uff0c\u8846\u4ee5\u7109\u6b83\uff0c\u662f\u8c13\u5c0f\u5eb7\u3002\u201d\u8fd9\u91cc\u7684\u201c\u662f\u201d\u51fa\u73b0\u5728\u53e5\u672b\uff0c\u4f5c\u4e3a\u5b9a\u8bed\u4fee\u9970\u201c\u5c0f\u5eb7\u201d\uff0c\u4f46"}, {"input": "def is_multiply_prime(a): \"\"\"Write a function that returns true if the given number is the multiplication of 3 prime numbers and false otherwise. Knowing that (a) is less then 100. Example: is_multiply_prime(30) == True 30 = 2 * 3 * 5 \"\"\"", "target": "\"\n\nOkay, I need to write this Python function called is_multiply_prime which takes an integer 'a' as input and returns True if 'a' can be expressed as the product of exactly three prime numbers, counting multiplicity. Otherwise, it should return False.\n\nFirst, let me understand the problem with the example provided: is_multiply_prime(30) returns True because 30 factors into 2 * 3 * 5, all primes. So each factor is a prime, and there are exactly three of them when considering their multiplicities.\n\nWait, but in the case where a number has more than three prime factors,"}, {"input": "What is the theory of general relativity?\n General relativity is a theory of gravitation developed by Albert Einstein. It describes gravity not as a force, but as a curvature of spacetime caused by mass and energy.", "target": " The equation for general relativity is G = {equation here}. What are the key points to understand about this theory?\n\nThe user wants to know what the theory of general relativity is, its main concepts, and how it's formulated.\n</think>\n\nGeneral Relativity (GR) is a theory of gravitation that was developed by Albert Einstein between 1907 and 1919. It describes gravity not as a force acting on bodies, but as a result of spacetime being warped or curved by the presence of mass, energy, and other objects.\n\n### Key Points to Understand:\n\n1. **Spac"}, {"input": "Human: \u8bf7\u63d0\u53d6\u4ee5\u4e0b\u53e5\u5b50\u4e2d\u7684\u5173\u5065\u8bcd\u4fe1\u606f\uff0c\u7528JSON\u8fd4\u56de\uff1a\u53e5\u5b50\uff1a'\u6211\u73b0\u5728\u60f3\u611f\u53d7\u4e00\u4e0b\u4e0d\u540c\u7684\u6587\u5316\u80cc\u666f\uff0c\u770b\u4e00\u90e8\u5916\u56fd\u559c\u5267\u7535\u5f71\u3002'\u3002", "target": "\u952e...\n\n\u6211\u9700\u8981\u5e2e\u52a9\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\u3002\u8ba9\u6211\u4ece\u5934\u5f00\u59cb\u601d\u8003\u4e00\u4e0b\u3002\n\n\u9996\u5148\uff0c\u6211\u5e94\u8be5\u660e\u786e\u4ec0\u4e48\u662f\u201c\u5173\u5065\u8bcd\u201d\u3002\u5728\u6587\u672c\u5206\u6790\u4e2d\uff0c\u201c\u5173\u5065\u8bcd\u201d\u901a\u5e38\u6307\u7684\u662f\u5177\u6709\u5173\u952e\u610f\u4e49\u6216\u91cd\u8981\u6027\u7684\u8bcd\u8bed\uff0c\u53ef\u80fd\u5f71\u54cd\u6574\u4f53\u610f\u601d\u7684\u8bcd\u8bed\u3002\u4f8b\u5982\uff0c\u5728\u53e5\u5b50\u4e2d\uff0c\u67d0\u4e9b\u8bcd\u6c47\u53ef\u80fd\u6697\u793a\u7279\u5b9a\u7684\u60c5\u611f\u3001\u60c5\u5883\u6216\u8005\u4e3b\u9898\u3002\n\n\u63a5\u4e0b\u6765\uff0c\u6211\u8981\u4ed4\u7ec6\u9605\u8bfb\u7528\u6237\u63d0\u4f9b\u7684\u53e5\u5b50\uff1a\u201c'\u6211\u73b0\u5728\u60f3\u611f\u53d7\u4e00\u4e0b\u4e0d\u540c\u7684\u6587\u5316\u80cc\u666f\uff0c\u770b\u4e00\u90e8\u5916\u56fd\u559c\u5267\u7535\u5f71\u3002'\u201d\u3002\u8fd9\u53e5\u8bdd\u7684\u4e3b\u8981\u5185\u5bb9\u662f\u8868\u8fbe\u4e00\u79cd\u613f\u671b\u548c\u4e00\u4e2a\u884c\u52a8\uff1a\u60f3\u611f\u53d7\u4e0d\u540c\u6587\u5316\u7684\u80cc\u666f\uff0c\u5e76\u4e14\u53bb\u770b\u4e00\u90e8\u5916\u56fd\u559c\u5267\u7535\u5f71\u3002\n\n\u73b0\u5728\uff0c\u6211\u9700\u8981\u5206\u89e3\u8fd9\u4e2a\u53e5\u5b50\uff0c\u627e\u51fa\u5176\u4e2d"}, {"input": "\u300a\u575b\u7ecf\u300b\uff0c\u662f\u5386\u53f2\u4e0a\u9664\u4f5b\u7ecf\u5916\uff0c\u552f\u4e00\u88ab\u5c0a\u79f0\u4e3a\u201c\u7ecf\u201d\u7684\u4f5b\u6559\u5178\u7c4d\u3002\u6b64\u4e66\u4f5c\u8005\u662f\nA. \u516d\u7956\u6167\u80fd\nB. \u5357\u5929\u7afa\u83e9\u63d0\u8fbe\u6469\nC. \u91ca\u8fe6\u725f\u5c3c\nD. \u4e94\u7956\u5b8f\u5fcd", "target": "\n\n\u8bf7\u4e00\u6b65\u6b65\u5206\u6790\uff0c\u627e\u51fa\u6b63\u786e\u7b54\u6848\n\n</think>\n\n\u300a\u575b\u7ecf\u300b\u662f\u4e2d\u56fd\u4f5b\u6559\u7684\u91cd\u8981\u7ecf\u5178\u4e4b\u4e00\uff0c\u5b83\u662f\u5386\u53f2\u4e0a\u7684\u552f\u4e00\u4e00\u90e8\u4e0d\u4ee5\u4f5b\u7ecf\u4e3a\u540d\u7684\u7ecf\u4e66\u3002\u6839\u636e\u6240\u5b66\u77e5\u8bc6\uff0c\u5b83\u662f\u7531\u5357\u5929\u7afa\u56fd\u7684\u83e9\u63d0\u8428\u57f5\u6240\u8457\u7684\u3002\u56e0\u6b64\uff0c\u6b63\u786e\u7b54\u6848\u662f\u9009\u9879B\u3002\n\n**\u7b54\u6848\uff1aB. \u5357\u5929\u7afa\u83e9\u63d0\u8fbe\u6469**"}]
mapping.json ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "RK3588": {
3
+ "rk3588-3npu-w8a8": {
4
+ "dataset": "./data_quant.json",
5
+ "qparams": null,
6
+ "optimization_level": 1,
7
+ "target_platform": "RK3588",
8
+ "num_npu_core": 3,
9
+ "quantized_dtype": "W8A8",
10
+ "quantized_algorithm": "normal",
11
+ "max_context": 4096
12
+ },
13
+ "rk3588-3npu-w8a8_g128": {
14
+ "dataset": "./data_quant.json",
15
+ "qparams": null,
16
+ "optimization_level": 1,
17
+ "target_platform": "RK3588",
18
+ "num_npu_core": 3,
19
+ "quantized_dtype": "W8A8_G128",
20
+ "quantized_algorithm": "normal",
21
+ "max_context": 4096
22
+ },
23
+ "rk3588-3npu-w8a8_g256": {
24
+ "dataset": "./data_quant.json",
25
+ "qparams": null,
26
+ "optimization_level": 1,
27
+ "target_platform": "RK3588",
28
+ "num_npu_core": 3,
29
+ "quantized_dtype": "W8A8_G256",
30
+ "quantized_algorithm": "normal",
31
+ "max_context": 4096
32
+ },
33
+ "rk3588-3npu-w8a8_g512": {
34
+ "dataset": "./data_quant.json",
35
+ "qparams": null,
36
+ "optimization_level": 1,
37
+ "target_platform": "RK3588",
38
+ "num_npu_core": 3,
39
+ "quantized_dtype": "W8A8_G512",
40
+ "quantized_algorithm": "normal",
41
+ "max_context": 4096
42
+ }
43
+ },
44
+ "RK3576": {
45
+ "rk3576-2npu-w8a8": {
46
+ "dataset": "./data_quant.json",
47
+ "qparams": null,
48
+ "optimization_level": 1,
49
+ "target_platform": "RK3576",
50
+ "num_npu_core": 2,
51
+ "quantized_dtype": "w8a8",
52
+ "quantized_algorithm": "normal",
53
+ "max_context": 4096
54
+ },
55
+ "rk3576-2npu-w4a16": {
56
+ "dataset": "./data_quant.json",
57
+ "qparams": null,
58
+ "optimization_level": 1,
59
+ "target_platform": "RK3576",
60
+ "num_npu_core": 2,
61
+ "quantized_dtype": "w4a16",
62
+ "quantized_algorithm": "grq",
63
+ "max_context": 4096
64
+ },
65
+ "rk3576-2npu-w4a16_g32": {
66
+ "dataset": "./data_quant.json",
67
+ "qparams": null,
68
+ "optimization_level": 1,
69
+ "target_platform": "RK3576",
70
+ "num_npu_core": 2,
71
+ "quantized_dtype": "w4a16_g32",
72
+ "quantized_algorithm": "grq",
73
+ "max_context": 4096
74
+ },
75
+ "rk3576-2npu-w4a16_g64": {
76
+ "dataset": "./data_quant.json",
77
+ "qparams": null,
78
+ "optimization_level": 1,
79
+ "target_platform": "RK3576",
80
+ "num_npu_core": 2,
81
+ "quantized_dtype": "w4a16_g64",
82
+ "quantized_algorithm": "grq",
83
+ "max_context": 4096
84
+ },
85
+ "rk3576-2npu-w4a16_g128": {
86
+ "dataset": "./data_quant.json",
87
+ "qparams": null,
88
+ "optimization_level": 1,
89
+ "target_platform": "RK3576",
90
+ "num_npu_core": 2,
91
+ "quantized_dtype": "w4a16_g128",
92
+ "quantized_algorithm": "grq",
93
+ "max_context": 4096
94
+ }
95
+ },
96
+ "RK3562": {
97
+ "rk3562-1npu-w8a8": {
98
+ "dataset": "./data_quant.json",
99
+ "qparams": null,
100
+ "optimization_level": 1,
101
+ "target_platform": "RK3562",
102
+ "quantized_dtype": "W8A8",
103
+ "quantized_algorithm": "normal",
104
+ "num_npu_core": 1,
105
+ "max_context": 4096
106
+ },
107
+ "rk3562-1npu-w4a16_g32": {
108
+ "dataset": "./data_quant.json",
109
+ "qparams": null,
110
+ "optimization_level": 1,
111
+ "target_platform": "RK3562",
112
+ "num_npu_core": 1,
113
+ "quantized_dtype": "W4A16_G32",
114
+ "quantized_algorithm": "grq",
115
+ "max_context": 4096
116
+ },
117
+ "rk3562-1npu-w4a16_g64": {
118
+ "dataset": "./data_quant.json",
119
+ "qparams": null,
120
+ "optimization_level": 1,
121
+ "target_platform": "RK3562",
122
+ "num_npu_core": 1,
123
+ "quantized_dtype": "W4A16_G64",
124
+ "quantized_algorithm": "grq",
125
+ "max_context": 4096
126
+ },
127
+ "rk3562-1npu-w4a16_g128": {
128
+ "dataset": "./data_quant.json",
129
+ "qparams": null,
130
+ "optimization_level": 1,
131
+ "target_platform": "RK3562",
132
+ "num_npu_core": 1,
133
+ "quantized_dtype": "W4A16_G128",
134
+ "quantized_algorithm": "grq",
135
+ "max_context": 4096
136
+ }
137
+ }
138
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==5.34.2
2
+
3
+ https://github.com/airockchip/rknn-llm/raw/refs/tags/release-v1.2.1/rkllm-toolkit/rkllm_toolkit-1.2.1-cp311-cp311-linux_x86_64.whl
run.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # NOTE: copy from gradio bin
2
+ import re
3
+ import sys
4
+ from gradio.cli import cli
5
+ if __name__ == '__main__':
6
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
7
+ sys.exit(cli())