Spaces:
Runtime error
Runtime error
#--- START OF FILE app.py --- | |
import os | |
import shutil | |
import subprocess | |
import torch | |
from transformers import AutoConfig, AutoModelForCausalLM | |
from huggingface_hub import HfApi, whoami, ModelCard, list_models | |
from gradio_huggingfacehub_search import HuggingfaceHubSearch | |
from apscheduler.schedulers.background import BackgroundScheduler | |
from textwrap import dedent | |
import gradio as gr | |
import hashlib | |
import torch.nn.utils.prune as prune | |
import torch.nn.functional as F | |
from torch.utils.checkpoint import checkpoint | |
import logging | |
from datetime import datetime | |
from typing import List, Dict | |
logging.basicConfig(level=logging.INFO) | |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" | |
HF_TOKEN = os.environ.get("HF_TOKEN") | |
SPACE_ID = "Ffftdtd5dtft/gguf-my-repo" # Replace with your space ID if different | |
def generate_importance_matrix(model_path, train_data_path): | |
os.chdir("llama.cpp") | |
if not os.path.isfile(f"../{model_path}"): | |
raise Exception(f"Model file not found: {model_path}") | |
imatrix_command = f"./llama-imatrix -m ../{model_path} -f {train_data_path} -ngl 99 --output-frequency 10" | |
process = subprocess.Popen(imatrix_command, shell=True) | |
try: | |
process.wait(timeout=3600) | |
except subprocess.TimeoutExpired: | |
process.kill() | |
os.chdir("..") | |
def split_upload_model(model_path, repo_id, oauth_token, split_max_tensors=256, split_max_size=None): | |
if oauth_token.token is None: | |
raise ValueError("You have to be logged in.") | |
split_cmd = f"llama.cpp/llama-gguf-split --split --split-max-tensors {split_max_tensors}" | |
if split_max_size: | |
split_cmd += f" --split-max-size {split_max_size}" | |
split_cmd += f" {model_path} {model_path.split('.')[0]}" | |
result = subprocess.run(split_cmd, shell=True, capture_output=True, text=True) | |
if result.returncode != 0: | |
raise Exception(f"Error splitting the model: {result.stderr}") | |
sharded_model_files = [f for f in os.listdir('.') if f.startswith(model_path.split('.')[0])] | |
if sharded_model_files: | |
api = HfApi(token=oauth_token.token) | |
for file in sharded_model_files: | |
file_path = os.path.join('.', file) | |
try: | |
api.upload_file(path_or_fileobj=file_path, path_in_repo=file, repo_id=repo_id) | |
except Exception as e: | |
raise Exception(f"Error uploading file {file_path}: {e}") | |
else: | |
raise Exception("No sharded files found.") | |
def quantize_to_q1_with_min(tensor, min_value=-1): | |
tensor = torch.sign(tensor) | |
tensor[tensor < min_value] = min_value | |
return tensor | |
def quantize_model_to_q1_with_min(model, min_value=-1): | |
for name, param in model.named_parameters(): | |
if param.dtype in [torch.float32, torch.float16]: | |
with torch.no_grad(): | |
param.copy_(quantize_to_q1_with_min(param.data, min_value)) | |
def disable_unnecessary_components(model): | |
for name, module in model.named_modules(): | |
if isinstance(module, torch.nn.Dropout): | |
module.p = 0.0 | |
elif isinstance(module, torch.nn.BatchNorm1d): | |
module.eval() | |
def ultra_max_compress(model): | |
model = quantize_model_to_q1_with_min(model, min_value=-0.05) | |
disable_unnecessary_components(model) | |
with torch.no_grad(): | |
for name, param in model.named_parameters(): | |
if param.requires_grad: | |
param.requires_grad = False | |
param.data = torch.nn.functional.hardtanh(param.data, min_val=-1.0, max_val=1.0) | |
param.data = param.data.half() | |
model.eval() | |
for buffer_name, buffer in model.named_buffers(): | |
if buffer.numel() == 0: | |
model._buffers.pop(buffer_name) | |
return model | |
def optimize_model_resources(model): | |
torch.set_grad_enabled(False) | |
model.eval() | |
for name, param in model.named_parameters(): | |
param.requires_grad = False | |
if param.dtype == torch.float32: | |
param.data = param.data.half() | |
if hasattr(model, 'config'): | |
if hasattr(model.config, 'max_position_embeddings'): | |
model.config.max_position_embeddings = min(model.config.max_position_embeddings, 512) | |
if hasattr(model.config, 'hidden_size'): | |
model.config.hidden_size = min(model.config.hidden_size, 768) | |
return model | |
def aggressive_optimize(model, reduce_layers_factor=0.5): | |
if hasattr(model.config, 'num_attention_heads'): | |
model.config.num_attention_heads = int(model.config.num_attention_heads * reduce_layers_factor) | |
if hasattr(model.config, 'hidden_size'): | |
model.config.hidden_size = int(model.config.hidden_size * reduce_layers_factor) | |
return model | |
def apply_quantization(model, use_int8_inference): | |
if use_int8_inference: | |
quantized_model = torch.quantization.quantize_dynamic( | |
model, {torch.nn.Linear}, dtype=torch.qint8 | |
) | |
return quantized_model | |
else: | |
return model | |
def reduce_layers(model, reduction_factor=0.5): | |
if hasattr(model, 'transformer') and hasattr(model.transformer, 'h'): | |
original_num_layers = len(model.transformer.h) | |
new_num_layers = int(original_num_layers * reduction_factor) | |
model.transformer.h = torch.nn.ModuleList(model.transformer.h[:new_num_layers]) | |
return model | |
def use_smaller_embeddings(model, reduction_factor=0.75): | |
if hasattr(model, 'config'): | |
original_embedding_dim = model.config.hidden_size | |
new_embedding_dim = int(original_embedding_dim * reduction_factor) | |
model.config.hidden_size = new_embedding_dim | |
if hasattr(model, 'resize_token_embeddings'): | |
model.resize_token_embeddings(int(model.config.vocab_size * reduction_factor)) | |
return model | |
def use_fp16_embeddings(model): | |
if hasattr(model, 'transformer') and hasattr(model.transformer, 'wte'): | |
model.transformer.wte = model.transformer.wte.half() | |
return model | |
def quantize_embeddings(model): | |
if hasattr(model, 'transformer') and hasattr(model.transformer, 'wte'): | |
model.transformer.wte = torch.quantization.quantize_dynamic( | |
model.transformer.wte, {torch.nn.Embedding}, dtype=torch.qint8 | |
) | |
return model | |
def use_bnb_f16(model): | |
if torch.cuda.is_available() and torch.cuda.is_bf16_supported(): | |
model = model.to(dtype=torch.bfloat16) | |
return model | |
def use_group_quantization(model): | |
for module in model.modules(): | |
if isinstance(module, torch.nn.Linear): | |
torch.quantization.fuse_modules(module, ['weight'], inplace=True) | |
torch.quantization.quantize_dynamic(module, {torch.nn.Linear}, dtype=torch.qint8, inplace=True) | |
return model | |
def apply_layer_norm_trick(model): | |
for name, module in model.named_modules(): | |
if isinstance(module, torch.nn.LayerNorm): | |
module.elementwise_affine = False | |
return model | |
def remove_padding(inputs, attention_mask): | |
last_non_padded = attention_mask.sum(dim=1) - 1 | |
gathered_inputs = torch.gather(inputs, dim=1, index=last_non_padded.unsqueeze(1).unsqueeze(2).expand(-1, -1, inputs.size(2))) | |
return gathered_inputs | |
def use_selective_quantization(model): | |
for module in model.modules(): | |
if isinstance(module, torch.nn.MultiheadAttention): | |
torch.quantization.quantize_dynamic(module, {torch.nn.Linear}, dtype=torch.qint8, inplace=True) | |
return model | |
def use_mixed_precision(model): | |
if hasattr(model, 'transformer') and hasattr(model.transformer, 'wte'): | |
model.transformer.wte = model.transformer.wte.half() | |
return model | |
def use_pruning_after_training(model, prune_amount=0.1): | |
from torch import nn as nn | |
for name, module in model.named_modules(): | |
if isinstance(module, (nn.Linear, nn.Conv2d)): | |
prune.l1_unstructured(module, name='weight', amount=prune_amount) | |
prune.remove(module, 'weight') | |
return model | |
def use_knowledge_distillation(model, teacher_model, temperature=2.0, alpha=0.5): | |
teacher_model.eval() | |
criterion = torch.nn.KLDivLoss(reduction='batchmean') | |
def distillation_loss(student_logits, teacher_logits): | |
student_probs = F.log_softmax(student_logits / temperature, dim=-1) | |
teacher_probs = F.softmax(teacher_logits / temperature, dim=-1) | |
return criterion(student_probs, teacher_probs) * (temperature**2) | |
def train_step(inputs, labels): | |
student_outputs = model(**inputs, labels=labels) | |
student_logits = student_outputs.logits | |
with torch.no_grad(): | |
teacher_outputs = teacher_model(**inputs) | |
teacher_logits = teacher_outputs.logits | |
loss = alpha * student_outputs.loss + (1 - alpha) * distillation_loss(student_logits, teacher_logits) | |
return loss | |
return train_step | |
def use_weight_sharing(model): | |
if hasattr(model, 'transformer') and hasattr(model.transformer, 'h'): | |
if len(model.transformer.h) > 1: | |
model.transformer.h[-1].load_state_dict(model.transformer.h[0].state_dict()) | |
return model | |
def use_low_rank_approximation(model, rank_factor=0.5): | |
for module in model.modules(): | |
if isinstance(module, torch.nn.Linear): | |
original_weight = module.weight.data | |
U, S, V = torch.linalg.svd(original_weight) | |
rank = int(S.size(0) * rank_factor) | |
module.weight.data = U[:, :rank] @ torch.diag(S[:rank]) @ V[:rank, :] | |
return model | |
def use_hashing_trick(model, num_hashes=1024): | |
def hash_features(features): | |
features_bytes = features.cpu().numpy().tobytes() | |
hash_object = hashlib.sha256(features_bytes) | |
hash_value = hash_object.hexdigest() | |
hashed_features = int(hash_value, 16) % num_hashes | |
return torch.tensor(hashed_features, device=features.device) | |
original_forward = model.forward | |
def forward(*args, **kwargs): | |
inputs = args[0] | |
hashed_inputs = hash_features(inputs) | |
return original_forward(hashed_inputs, *args[1:], **kwargs) | |
def use_quantization_aware_training(model): | |
model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm') | |
torch.quantization.prepare_qat(model, inplace=True) | |
torch.quantization.convert(model, inplace=True) | |
return model | |
def use_gradient_checkpointing(model): | |
def custom_forward(*inputs): | |
return checkpoint(model, *inputs) | |
model.forward = custom_forward | |
return model | |
def use_channel_pruning(model, prune_amount=0.1): | |
from torch import nn as nn | |
for module in model.modules(): | |
if isinstance(module, nn.Conv2d): | |
prune.ln_structured(module, name="weight", amount=prune_amount, n=2, dim=0) | |
prune.remove(module, 'weight') | |
return model | |
def use_sparse_tensors(model, sparsity_threshold=0.01): | |
for name, param in model.named_parameters(): | |
if param.dim() >= 2 and param.is_floating_point(): | |
sparse_param = param.to_sparse() | |
sparse_param._values()[sparse_param._values().abs() < sparsity_threshold] = 0 | |
param.data = sparse_param.to_dense() | |
return model | |
def use_lora(model, r=8, lora_alpha=16, lora_dropout=0.05, target_modules=None): | |
from peft import LoraConfig, get_peft_model | |
config = LoraConfig( | |
r=r, | |
lora_alpha=lora_alpha, | |
lora_dropout=lora_dropout, | |
target_modules=target_modules if target_modules else ["q_proj", "v_proj"], # Example target modules | |
bias="none", | |
task_type="CAUSAL_LM" | |
) | |
model = get_peft_model(model, config) | |
return model | |
def use_adalora(model, target_r=8, init_r=12, tmask_init=0.01, beta1=0.85, beta2=0.99, loha=False, **kwargs): | |
from peft import AdaLoraConfig, get_peft_model | |
config = AdaLoraConfig( | |
target_r=target_r, | |
init_r=init_r, | |
tmask_init=tmask_init, | |
beta1=beta1, | |
beta2=beta2, | |
loha=loha, | |
task_type="CAUSAL_LM", | |
**kwargs | |
) | |
model = get_peft_model(model, config) | |
return model | |
def use_ia3(model, target_modules=None): | |
from peft import IA3Config, get_peft_model | |
config = IA3Config( | |
target_modules=target_modules if target_modules else ["k_proj", "v_proj", "down_proj"], # Example target modules | |
feedforward_modules=None, | |
task_type="CAUSAL_LM" | |
) | |
model = get_peft_model(model, config) | |
return model | |
def use_prompt_tuning(model, num_virtual_tokens=8, prompt_tuning_init_text="You are a helpful assistant."): | |
from peft import PromptTuningConfig, get_peft_model, TaskType | |
config = PromptTuningConfig( | |
task_type=TaskType.CAUSAL_LM, | |
num_virtual_tokens=num_virtual_tokens, | |
prompt_tuning_init_text=prompt_tuning_init_text, | |
tokenizer_name_or_path=model.config.tokenizer_class if hasattr(model.config, 'tokenizer_class') else None | |
) | |
model = get_peft_model(model, config) | |
return model | |
def apply_moe_layer_splitting(model, num_experts: int = 4, expert_capacity_factor: float = 2.0, moe_layer_freq: int = 2): | |
# Assumes a standard transformer block structure | |
if not hasattr(model, 'transformer') or not hasattr(model.transformer, 'h'): | |
logging.warning("Model does not have the expected transformer structure for MoE splitting.") | |
return model | |
from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock, MixtralBLock | |
for i in range(len(model.transformer.h)): | |
if (i + 1) % moe_layer_freq == 0: | |
original_layer = model.transformer.h[i] | |
# Extract necessary components, handling different layer structures | |
if isinstance(original_layer, MixtralBLock): | |
config = original_layer.config | |
new_moe_block = MixtralSparseMoeBlock(config, num_experts=num_experts, capacity_factor=expert_capacity_factor) | |
# Copy relevant weights - this might need adjustments based on the model | |
new_moe_block.load_state_dict(original_layer.mlp.state_dict(), strict=False) | |
model.transformer.h[i] = new_moe_block | |
else: | |
logging.warning(f"Skipping layer {i} for MoE, not a recognized block type.") | |
return model | |
def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, | |
oauth_token: gr.OAuthToken | None, apply_aggressive_optimization, apply_reduce_layers, apply_smaller_embeddings, | |
apply_weight_sharing, apply_low_rank_approx, use_lora_opt, use_adalora_opt, use_ia3_opt, use_prompt_tuning_opt, | |
apply_moe_splitting, num_experts_moe, expert_capacity_factor_moe, moe_layer_freq_moe, | |
is_automated=False): | |
if oauth_token.token is None and not is_automated: | |
raise ValueError("You must be logged in to use GGUF-my-repo") | |
elif oauth_token.token is None and is_automated: | |
logging.warning("Running in automated mode without user authentication.") | |
model_name = model_id.split('/')[-1] | |
fp16 = f"{model_name}.fp16.gguf" | |
try: | |
api = HfApi(token=oauth_token.token if oauth_token else None) | |
dl_pattern = ["*.safetensors", "*.bin", "*.pt", "*.onnx", "*.h5", "*.tflite", "*.ckpt", "*.pb", "*.tar", "*.xml", "*.caffemodel", "*.md", "*.json", "*.model"] | |
pattern = "*.safetensors" if any(file.path.endswith(".safetensors") for file in api.list_repo_tree(repo_id=model_id, recursive=True)) else "*.bin" | |
dl_pattern += pattern | |
api.snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, allow_patterns=dl_pattern) | |
config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained(model_id, config=config, torch_dtype=torch.float16, trust_remote_code=True) | |
if apply_aggressive_optimization: | |
model = aggressive_optimize(model) | |
if apply_reduce_layers: | |
model = reduce_layers(model) | |
if apply_smaller_embeddings: | |
model = use_smaller_embeddings(model) | |
if apply_weight_sharing: | |
model = use_weight_sharing(model) | |
if apply_low_rank_approx: | |
model = use_low_rank_approximation(model) | |
if use_lora_opt: | |
model = use_lora(model) | |
if use_adalora_opt: | |
model = use_adalora(model) | |
if use_ia3_opt: | |
model = use_ia3(model) | |
if use_prompt_tuning_opt: | |
model = use_prompt_tuning(model) | |
if apply_moe_splitting: | |
model = apply_moe_layer_splitting(model, num_experts_moe, expert_capacity_factor_moe, moe_layer_freq_moe) | |
optimized_model_path = f"{model_name}_optimized" | |
model.save_pretrained(optimized_model_path) | |
conversion_script = "convert_hf_to_gguf.py" | |
fp16_conversion = f"python llama.cpp/{conversion_script} {optimized_model_path} --outtype f16 --outfile {fp16}" | |
result = subprocess.run(fp16_conversion, shell=True, capture_output=True) | |
if result.returncode != 0: | |
raise Exception(f"Error converting to fp16: {result.stderr}") | |
imatrix_path = "llama.cpp/imatrix.dat" | |
if use_imatrix: | |
if train_data_file: | |
train_data_path = train_data_file.name | |
else: | |
train_data_path = "groups_merged.txt" | |
if not os.path.isfile(train_data_path): | |
raise Exception(f"Training data file not found: {train_data_path}") | |
generate_importance_matrix(fp16, train_data_path) | |
username = whoami(oauth_token.token)["name"] if oauth_token and oauth_token.token else "automated-gguf" | |
quantized_gguf_name = f"{model_name.lower()}-{imatrix_q_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{q_method.lower()}.gguf" | |
quantized_gguf_path = quantized_gguf_name | |
if use_imatrix: | |
quantise_ggml = f"./llama.cpp/llama-quantize --imatrix {imatrix_path} {fp16} {quantized_gguf_path} {imatrix_q_method}" | |
else: | |
quantise_ggml = f"./llama.cpp/llama-quantize {fp16} {quantized_gguf_path} {q_method}" | |
result = subprocess.run(quantise_ggml, shell=True, capture_output=True) | |
if result.returncode != 0: | |
raise Exception(f"Error quantizing: {result.stderr}") | |
try: | |
subprocess.run(["llama.cpp/llama", "-m", quantized_gguf_path, "-p", "Test prompt"], check=True) | |
except Exception as e: | |
raise Exception(f"Model verification failed: {e}") | |
new_repo_id = f"{username}/{model_name}-{imatrix_q_method if use_imatrix else q_method}-GGUF" | |
new_repo_url = api.create_repo(repo_id=new_repo_id, exist_ok=True, private=private_repo) | |
try: | |
card = ModelCard.load(model_id, token=oauth_token.token if oauth_token else None) | |
except: | |
card = ModelCard("") | |
if card.data.tags is None: | |
card.data.tags = [] | |
card.data.tags.append("llama-cpp") | |
card.data.tags.append("gguf-my-repo") | |
card.data.base_model = model_id | |
optimization_notes = [] | |
if apply_aggressive_optimization: | |
optimization_notes.append("Aggressive optimization applied.") | |
if apply_reduce_layers: | |
optimization_notes.append("Number of layers reduced.") | |
if apply_smaller_embeddings: | |
optimization_notes.append("Embedding size reduced.") | |
if apply_weight_sharing: | |
optimization_notes.append("Weight sharing applied.") | |
if apply_low_rank_approx: | |
optimization_notes.append(f"Low-rank approximation applied.") | |
if use_lora_opt: | |
optimization_notes.append("LoRA applied.") | |
if use_adalora_opt: | |
optimization_notes.append("AdaLoRA applied.") | |
if use_ia3_opt: | |
optimization_notes.append("IA3 applied.") | |
if use_prompt_tuning_opt: | |
optimization_notes.append("Prompt Tuning applied.") | |
if apply_moe_splitting: | |
optimization_notes.append(f"Mixture-of-Experts (MoE) layer splitting applied with {num_experts_moe} experts every {moe_layer_freq_moe} layers.") | |
card.text = dedent( | |
f""" | |
# {new_repo_id} | |
This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. | |
Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model. | |
{' '.join(optimization_notes)} | |
## Use with llama.cpp | |
Install llama.cpp through brew (works on Mac and Linux) | |
```bash | |
brew install llama.cpp | |
``` | |
Invoke the llama.cpp server or the CLI. | |
### CLI: | |
```bash | |
llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is" | |
``` | |
### Server: | |
```bash | |
llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048 | |
``` | |
Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. | |
Step 1: Clone llama.cpp from GitHub. | |
``` | |
git clone https://github.com/ggerganov/llama.cpp | |
``` | |
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). | |
``` | |
cd llama.cpp && LLAMA_CURL=1 make | |
``` | |
Step 3: Run inference through the main binary. | |
``` | |
./llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is" | |
``` | |
or | |
``` | |
./llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048 | |
``` | |
""" | |
) | |
card.save(f"README.md") | |
if split_model: | |
split_upload_model(quantized_gguf_path, new_repo_id, oauth_token, split_max_tensors, split_max_size) | |
else: | |
try: | |
api.upload_file(path_or_fileobj=quantized_gguf_path, path_in_repo=quantized_gguf_name, repo_id=new_repo_id) | |
except Exception as e: | |
raise Exception(f"Error uploading quantized model: {e}") | |
if os.path.isfile(imatrix_path): | |
try: | |
api.upload_file(path_or_fileobj=imatrix_path, path_in_repo="imatrix.dat", repo_id=new_repo_id) | |
except Exception as e: | |
raise Exception(f"Error uploading imatrix.dat: {e}") | |
api.upload_file(path_or_fileobj=f"README.md", path_in_repo=f"README.md", repo_id=new_repo_id) | |
log_message = f"Successfully processed and uploaded GGUF model for {model_id} to {new_repo_url}" | |
logging.info(log_message) | |
return (f'Find your repo <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">here</a>', "llama.png") | |
except Exception as e: | |
error_message = f"Error processing model {model_id}: {e}" | |
logging.error(error_message) | |
return (f"Error: {e}", "error.png") | |
finally: | |
shutil.rmtree(model_name, ignore_errors=True) | |
shutil.rmtree(optimized_model_path, ignore_errors=True) | |
def select_models_for_automation(): | |
# Example logic: Select top N most downloaded models | |
models = list_models(sort="downloads", direction=-1, limit=5) | |
return [model.modelId for model in models] | |
def get_automation_parameters(): | |
# Example logic: Define default parameters or load from a config | |
return { | |
"q_method": "Q4_K_M", | |
"use_imatrix": False, | |
"imatrix_q_method": "IQ4_NL", | |
"private_repo": True, | |
"train_data_file": None, | |
"split_model": False, | |
"split_max_tensors": 256, | |
"split_max_size": None, | |
"apply_aggressive_optimization": True, | |
"apply_reduce_layers": True, | |
"apply_smaller_embeddings": True, | |
"apply_weight_sharing": False, | |
"apply_low_rank_approx": False, | |
"use_lora_opt": False, | |
"use_adalora_opt": False, | |
"use_ia3_opt": False, | |
"use_prompt_tuning_opt": False, | |
"apply_moe_splitting": False, | |
"num_experts_moe": 4, | |
"expert_capacity_factor_moe": 2.0, | |
"moe_layer_freq_moe": 2, | |
} | |
def automate_gguf_creation(): | |
logging.info(f"Starting automated GGUF creation at {datetime.now()}") | |
api = HfApi(token=HF_TOKEN) | |
try: | |
whoami(token=HF_TOKEN) # Check if the token is valid | |
except Exception as e: | |
logging.error(f"Error with Hugging Face token: {e}") | |
return | |
models_to_process = select_models_for_automation() | |
automation_params = get_automation_parameters() | |
for model_id in models_to_process: | |
logging.info(f"Attempting to process model: {model_id}") | |
try: | |
process_model(model_id=model_id, oauth_token=None, is_automated=True, **automation_params) | |
except Exception as e: | |
logging.error(f"Failed to process model {model_id} automatically: {e}") | |
css="""/* Custom CSS to allow scrolling */ .gradio-container {overflow-y: auto;}""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown("You must be logged in to use GGUF-my-repo for manual processing. Automation runs in the background.") | |
oauth_token = gr.OAuthButton(min_width=250) | |
model_id = HuggingfaceHubSearch(label="Hub Model ID", placeholder="Search for model id on Huggingface", search_type="model") | |
q_method = gr.Dropdown(["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"], | |
label="Quantization Method", info="GGML quantization type", value="Q4_K_M", filterable=False, visible=True) | |
imatrix_q_method = gr.Dropdown(["IQ1", "IQ1_S", "IQ1_XXS", "IQ2_S", "IQ2_XXS", "IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"], | |
label="Imatrix Quantization Method", info="GGML imatrix quants type", value="IQ4_NL", filterable=False, visible=False) | |
use_imatrix = gr.Checkbox(value=False, label="Use Imatrix Quantization", info="Use importance matrix for quantization.") | |
train_data_file = gr.File(label="Training Data File", file_types=["txt"], visible=False) | |
size_reduction_accordion = gr.Accordion("Additional Size Reduction Techniques", open=False) | |
with size_reduction_accordion: | |
apply_aggressive_optimization = gr.Checkbox(value=True, label="Apply Aggressive Optimization", info="Reduces attention heads and hidden size.") | |
apply_reduce_layers = gr.Checkbox(value=True, label="Reduce Layers", info="Reduces the number of layers in the model.") | |
apply_smaller_embeddings = gr.Checkbox(value=True, label="Use Smaller Embeddings", info="Reduces the size of the embedding layer.") | |
apply_weight_sharing = gr.Checkbox(value=False, label="Apply Weight Sharing", info="Shares weights across layers to reduce parameters.") | |
apply_low_rank_approx = gr.Checkbox(value=False, label="Apply Low-Rank Approximation", info="Approximates weight matrices with lower rank.") | |
use_lora_opt = gr.Checkbox(value=False, label="Use LoRA", info="Applies Low-Rank Adaptation.") | |
use_adalora_opt = gr.Checkbox(value=False, label="Use AdaLoRA", info="Applies Adaptive Low-Rank Adaptation.") | |
use_ia3_opt = gr.Checkbox(value=False, label="Use IA3", info="Applies Infused Adapter by Inhibiting and Amplifying Inner Activations.") | |
use_prompt_tuning_opt = gr.Checkbox(value=False, label="Use Prompt Tuning", info="Adds trainable virtual tokens to the input embeddings.") | |
apply_moe_splitting = gr.Checkbox(value=False, label="Apply MoE Layer Splitting", info="Splits layers into a mixture-of-experts (MoE).", visible=False) | |
with gr.Row(visible=False) as moe_params: | |
num_experts_moe = gr.Number(value=4, label="Number of Experts", info="Number of experts to use in the MoE layers.", precision=0) | |
expert_capacity_factor_moe = gr.Number(value=2.0, label="Expert Capacity Factor", info="Capacity factor for each expert in the MoE layer.", precision=1) | |
moe_layer_freq_moe = gr.Number(value=2, label="MoE Layer Frequency", info="Apply MoE every N layers", precision=0) | |
private_repo = gr.Checkbox(value=True, label="Private Repo", info="Create a private repo under your username.") | |
split_model = gr.Checkbox(value=False, label="Split Model", info="Shard the model using gguf-split.") | |
split_max_tensors = gr.Number(value=256, label="Max Tensors per File", info="Maximum number of tensors per file when splitting model.", visible=False) | |
split_max_size = gr.Textbox(label="Max File Size", info="Maximum file size when splitting model (--split-max-size). May leave empty to use the default.", visible=False) | |
use_imatrix.change(fn=lambda use_imatrix: gr.update(visible=not use_imatrix), inputs=use_imatrix, outputs=q_method) | |
use_imatrix.change(fn=lambda use_imatrix: gr.update(visible=use_imatrix), inputs=use_imatrix, outputs=imatrix_q_method) | |
use_imatrix.change(fn=lambda use_imatrix: gr.update(visible=use_imatrix), inputs=use_imatrix, outputs=train_data_file) | |
split_model.change(fn=lambda split_model: gr.update(visible=split_model), inputs=split_model, outputs=split_max_tensors) | |
split_model.change(fn=lambda split_model: gr.update(visible=split_model), inputs=split_model, outputs=split_max_size) | |
apply_moe_splitting.change(fn=lambda apply_moe_splitting: gr.update(visible=apply_moe_splitting), inputs=apply_moe_splitting, outputs=moe_params) | |
iface = gr.Interface( | |
fn=process_model, | |
inputs=[ | |
model_id, | |
q_method, | |
use_imatrix, | |
imatrix_q_method, | |
private_repo, | |
train_data_file, | |
split_model, | |
split_max_tensors, | |
split_max_size, | |
oauth_token, | |
apply_aggressive_optimization, | |
apply_reduce_layers, | |
apply_smaller_embeddings, | |
apply_weight_sharing, | |
apply_low_rank_approx, | |
use_lora_opt, | |
use_adalora_opt, | |
use_ia3_opt, | |
use_prompt_tuning_opt, | |
apply_moe_splitting, | |
num_experts_moe, | |
expert_capacity_factor_moe, | |
moe_layer_freq_moe, | |
], | |
outputs=[ | |
gr.Markdown(label="output"), | |
gr.Image(show_label=False), | |
], | |
title="Create your own GGUF Quants, blazingly fast ⚡!", | |
description="The space takes an HF repo as an input, applies size reduction techniques, quantizes it and creates a Public or Private repo containing the selected quant under your HF user namespace. It also automates the creation of GGUF quants for popular models in the background.", | |
api_name=False | |
) | |
def restart_space(): | |
HfApi().restart_space(repo_id=SPACE_ID, token=HF_TOKEN, factory_reboot=True) | |
scheduler = BackgroundScheduler() | |
scheduler.add_job(restart_space, "interval", seconds=21600) | |
scheduler.add_job(automate_gguf_creation, "interval", hours=6) # Run automation every 6 hours | |
scheduler.start() | |
demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False) |