Faizal2805 commited on
Commit
2e415e2
·
verified ·
1 Parent(s): 08c8c12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -30
app.py CHANGED
@@ -4,28 +4,20 @@ from huggingface_hub import InferenceClient
4
  # Initialize Hugging Face Inference Client
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
- # Response Function (Now Compatible with 'type=messages')
8
- def respond(
9
- message,
10
- history,
11
- system_message,
12
- max_tokens,
13
- temperature,
14
- top_p,
15
- ):
16
- # Correct structure for Gradio's 'messages' format
17
  messages = [{"role": "system", "content": system_message}]
18
 
19
- # Ensure 'history' is handled as a list of dicts
20
  if isinstance(history, list):
21
  for entry in history:
22
  if isinstance(entry, dict):
23
- messages.append(entry) # Correct format already
24
  elif isinstance(entry, tuple) and len(entry) == 2:
25
  messages.append({"role": "user", "content": entry[0]})
26
  messages.append({"role": "assistant", "content": entry[1]})
27
 
28
- # Add current user message
29
  messages.append({"role": "user", "content": message})
30
 
31
  # Initialize response
@@ -44,13 +36,21 @@ def respond(
44
  yield response
45
 
46
 
47
- # Launch the Gradio app
48
- if __name__ == "__main__":
49
- demo.launch()
 
 
 
 
 
 
 
 
50
 
51
- # Fine-Tuning GPT-2 on Hugging Face Spaces (Streaming 40GB Dataset, No Storage Issues)
52
  from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
53
- from datasets import load_dataset
54
  from peft import LoraConfig, get_peft_model
55
  import torch
56
 
@@ -65,20 +65,17 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
65
 
66
  # Custom Dataset (Predefined Q&A Pairs for Project Expo)
67
  custom_data = [
68
- {"prompt": "Who are you?", "response": "I am Eva, a virtual voice assistant."},
69
- {"prompt": "What is your name?", "response": "I am Eva, how can I help you?"},
70
- {"prompt": "What can you do?", "response": "I can assist with answering questions, searching the web, and much more!"},
71
- {"prompt": "Who invented the computer?", "response": "Charles Babbage is known as the father of the computer."},
72
- {"prompt": "Tell me a joke.", "response": "Why don’t scientists trust atoms? Because they make up everything!"},
73
- {"prompt": "Who is the Prime Minister of India?", "response": "The current Prime Minister of India is Narendra Modi."},
74
- {"prompt": "Who created you?", "response": "I was created by an expert team specializing in AI fine-tuning and web development."}
75
  ]
76
 
77
  # Convert custom dataset to Hugging Face Dataset
78
- dataset_custom = load_dataset("json", data_files={"train": custom_data})
 
79
 
80
- # Load OpenWebText dataset (5% portion to avoid streaming issues)
81
- dataset = load_dataset("Skylion007/openwebtext", split="train[:20%]")
82
 
83
  # Tokenization function
84
  def tokenize_function(examples):
@@ -89,11 +86,11 @@ tokenized_datasets = dataset.map(tokenize_function, batched=True)
89
  # Apply LoRA for efficient fine-tuning
90
  lora_config = LoraConfig(
91
  r=8, lora_alpha=32, lora_dropout=0.05, bias="none",
92
- target_modules=["c_attn", "c_proj"] # Apply LoRA to attention layers
93
  )
94
 
95
  model = get_peft_model(model, lora_config)
96
- model.gradient_checkpointing_enable() # Enable checkpointing for memory efficiency
97
 
98
  # Training arguments
99
  training_args = TrainingArguments(
@@ -129,5 +126,6 @@ def generate_response(prompt):
129
  outputs = model.generate(**inputs, max_length=100)
130
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
131
 
 
132
  demo = gr.Interface(fn=generate_response, inputs="text", outputs="text")
133
  demo.launch()
 
4
  # Initialize Hugging Face Inference Client
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
+ # Response Function
8
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
9
+ # Ensure correct message structure
 
 
 
 
 
 
 
10
  messages = [{"role": "system", "content": system_message}]
11
 
 
12
  if isinstance(history, list):
13
  for entry in history:
14
  if isinstance(entry, dict):
15
+ messages.append(entry)
16
  elif isinstance(entry, tuple) and len(entry) == 2:
17
  messages.append({"role": "user", "content": entry[0]})
18
  messages.append({"role": "assistant", "content": entry[1]})
19
 
20
+ # Append user message
21
  messages.append({"role": "user", "content": message})
22
 
23
  # Initialize response
 
36
  yield response
37
 
38
 
39
+ # Gradio Chat Interface
40
+ demo = gr.ChatInterface(
41
+ respond,
42
+ chatbot=gr.Chatbot(type="messages"),
43
+ additional_inputs=[
44
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
45
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
46
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
47
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
48
+ ],
49
+ )
50
 
51
+ # Fine-Tuning GPT-2 on Hugging Face Spaces
52
  from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
53
+ from datasets import Dataset
54
  from peft import LoraConfig, get_peft_model
55
  import torch
56
 
 
65
 
66
  # Custom Dataset (Predefined Q&A Pairs for Project Expo)
67
  custom_data = [
68
+ {"text": "Who are you?", "label": "I am Eva, a virtual voice assistant."},
69
+ {"text": "What is your name?", "label": "I am Eva, how can I help you?"},
70
+ {"text": "What can you do?", "label": "I can assist with answering questions, searching the web, and much more!"},
 
 
 
 
71
  ]
72
 
73
  # Convert custom dataset to Hugging Face Dataset
74
+ dataset_custom = Dataset.from_dict({"text": [d['text'] for d in custom_data],
75
+ "label": [d['label'] for d in custom_data]})
76
 
77
+ # Load OpenWebText dataset (5% portion)
78
+ dataset = dataset_custom.train_test_split(test_size=0.2)['train']
79
 
80
  # Tokenization function
81
  def tokenize_function(examples):
 
86
  # Apply LoRA for efficient fine-tuning
87
  lora_config = LoraConfig(
88
  r=8, lora_alpha=32, lora_dropout=0.05, bias="none",
89
+ target_modules=["c_attn", "c_proj"]
90
  )
91
 
92
  model = get_peft_model(model, lora_config)
93
+ model.gradient_checkpointing_enable()
94
 
95
  # Training arguments
96
  training_args = TrainingArguments(
 
126
  outputs = model.generate(**inputs, max_length=100)
127
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
128
 
129
+ # Corrected Gradio Interface
130
  demo = gr.Interface(fn=generate_response, inputs="text", outputs="text")
131
  demo.launch()