Spaces:
Sleeping
Sleeping
File size: 4,016 Bytes
fdd2607 ea92c48 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()
# Fine-Tuning GPT-2 on Hugging Face Spaces (Streaming 40GB Dataset, No Storage Issues)
# Install required libraries
!pip install transformers datasets peft accelerate bitsandbytes torch torchvision torchaudio gradio -q
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import load_dataset
from peft import LoraConfig, get_peft_model
import torch
# Authenticate Hugging Face
from huggingface_hub import notebook_login
notebook_login()
# Load GPT-2 model and tokenizer
model_name = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Load the OpenWebText dataset using streaming (No download required)
dataset = load_dataset("Skylion007/openwebtext", split="train[:5%]") # Load 5% to avoid streaming issues
# Tokenization function
def tokenize_function(examples):
return tokenizer(examples["text"], truncation=True, padding="max_length", max_length=512)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# Apply LoRA for efficient fine-tuning
lora_config = LoraConfig(
r=8, lora_alpha=32, lora_dropout=0.05, bias="none",
target_modules=["c_attn", "c_proj"] # Apply LoRA to attention layers
)
model = get_peft_model(model, lora_config)
# Enable gradient checkpointing to reduce memory usage
model.gradient_checkpointing_enable()
# Training arguments
training_args = TrainingArguments(
output_dir="gpt2_finetuned",
auto_find_batch_size=True,
gradient_accumulation_steps=4,
learning_rate=5e-5,
num_train_epochs=3,
save_strategy="epoch",
logging_dir="logs",
bf16=True,
push_to_hub=True
)
# Trainer setup
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets
)
# Start fine-tuning
trainer.train()
# Save and push the model to Hugging Face Hub
trainer.save_model("gpt2_finetuned")
tokenizer.save_pretrained("gpt2_finetuned")
trainer.push_to_hub()
# Deploy as Gradio Interface
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=100)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
demo = gr.Interface(fn=generate_response, inputs="text", outputs="text")
demo.launch()
|