File size: 2,267 Bytes
021c2c9
 
afa7e4f
021c2c9
afa7e4f
021c2c9
 
 
afa7e4f
021c2c9
 
 
 
 
 
 
afa7e4f
021c2c9
 
afa7e4f
021c2c9
 
 
 
 
 
 
 
 
afa7e4f
021c2c9
 
afa7e4f
021c2c9
 
 
 
 
 
 
 
afa7e4f
021c2c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
afa7e4f
021c2c9
 
 
 
 
afa7e4f
021c2c9
afa7e4f
021c2c9
 
 
afa7e4f
021c2c9
 
 
 
afa7e4f
 
 
021c2c9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import torch
import transformers
import gradio as gr
from unsloth import FastLanguageModel

# Load the fine-tuned Unsloth model
max_seq_length = 2048  # Adjust based on your training
dtype = None  # None for auto detection

def load_model():
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name="ivwhy/lora_model",  # Your fine-tuned model path
        max_seq_length=max_seq_length,
        dtype=dtype,
        load_in_4bit=True  # Optional: load in 4-bit for efficiency
    )

    # Optional: Add special tokens for chat if needed
    tokenizer.pad_token = tokenizer.eos_token

    # Create the pipeline
    pipeline = transformers.pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        device=0 if torch.cuda.is_available() else -1  # Use GPU if available
    )
    
    return pipeline, tokenizer

# Load model globally
generation_pipeline, tokenizer = load_model()

def chat_function(message, history, system_prompt, max_new_tokens, temperature):
    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": message}
    ]
    
    # Apply chat template
    prompt = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
    )
    
    # Define terminators
    terminators = [
        tokenizer.eos_token_id,
        tokenizer.convert_tokens_to_ids("<|eot_id|>")
    ]
    
    # Generate response
    outputs = generation_pipeline(
        prompt,
        max_new_tokens=max_new_tokens,
        eos_token_id=terminators,
        do_sample=True,
        temperature=temperature,
        top_p=0.9,
    )
    
    # Extract and return just the generated text
    return outputs[0]["generated_text"][len(prompt):]

# Create Gradio interface
demo = gr.ChatInterface(
    chat_function,
    textbox=gr.Textbox(placeholder="Enter message here", container=False, scale=7),
    chatbot=gr.Chatbot(height=400),
    additional_inputs=[
        gr.Textbox("You are helpful AI", label="System Prompt"),
        gr.Slider(minimum=1, maximum=4000, value=500, label="Max New Tokens"),
        gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature")
    ]
)

if __name__ == "__main__":
    demo.launch()