nafisneehal commited on
Commit
5387ea1
·
verified ·
1 Parent(s): ba203a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -60
app.py CHANGED
@@ -1,64 +1,83 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
 
 
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import os
3
+ import torch
4
+ from unsloth import FastLanguageModel
5
+ from huggingface_hub import spaces
6
+
7
+ # Get Hugging Face token from environment variables
8
+ HF_TOKEN = os.environ.get('HF_TOKEN')
9
+
10
+ # Check if we're running in a Hugging Face Space with GPU constraints
11
+ IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
12
+ IS_SPACE = os.environ.get("SPACE_ID", None) is not None
13
+
14
+ # Determine device (use GPU if available)
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
17
+
18
+ print(f"Using device: {device}")
19
+ print(f"Low memory mode: {LOW_MEMORY}")
20
+
21
+ # Model configuration
22
+ max_seq_length = 2048 # Max sequence length for RoPE scaling
23
+ dtype = torch.float16 if device == "cuda" else torch.float32
24
+ load_in_4bit = True # Enable 4-bit quantization if memory is limited
25
+
26
+ # Load model and tokenizer with device mapping
27
+ model_name = "nafisneehal/chandler_bot"
28
+ model, tokenizer = FastLanguageModel.from_pretrained(
29
+ model_name=model_name,
30
+ max_seq_length=max_seq_length,
31
+ dtype=dtype,
32
+ load_in_4bit=load_in_4bit,
33
+ device_map="auto" if device == "cuda" else None # Automatic GPU mapping
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  )
35
+ FastLanguageModel.for_inference(model) # Optimize model for faster inference
36
+
37
+ # Define prompt structure (update if necessary for your model)
38
+ alpaca_prompt = "{instruction} {input} {output}"
39
+
40
+ instruction_text = "Learn how to talk like Chandler - a popular character from FRIENDS TV Show. Input is someone saying something, Output is what Chandler saying in response."
41
+
42
+
43
+ @spaces.GPU # Use GPU provided by Hugging Face Spaces if available
44
+ def generate_response(user_input, chat_history):
45
+ instruction = user_input # Treats user input as instruction
46
+ input_text = "" # Any additional input if needed; empty otherwise
47
+
48
+ # Prepare inputs for model inference on the correct device
49
+ inputs = tokenizer(
50
+ [alpaca_prompt.format(instruction, input_text, "")],
51
+ return_tensors="pt"
52
+ ).to(device) # Ensure tensors are on the correct device
53
+
54
+ # Generate response on GPU or CPU as appropriate
55
+ with torch.no_grad():
56
+ outputs = model.generate(**inputs, max_new_tokens=100)
57
+
58
+ # Decode response
59
+ bot_reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
60
+
61
+ # Update chat history with user and bot interactions
62
+ chat_history.append(("User", user_input))
63
+ chat_history.append(("Bot", bot_reply))
64
+
65
+ return chat_history, "" # Returns updated chat history and clears input
66
+
67
+
68
+ # Set up Gradio interface
69
+ with gr.Blocks() as demo:
70
+ gr.Markdown("# Llama-Based Chatbot on GPU")
71
+
72
+ chat_history = gr.Chatbot(label="Chat History")
73
+ user_input = gr.Textbox(
74
+ placeholder="Type your message here...", label="Your Message")
75
 
76
+ # Connect submit actions to generate response function
77
+ user_input.submit(generate_response, [user_input, chat_history], [
78
+ chat_history, user_input])
79
+ submit_btn = gr.Button("Send")
80
+ submit_btn.click(generate_response, [user_input, chat_history], [
81
+ chat_history, user_input])
82
 
83
+ demo.launch()