File size: 10,434 Bytes
f871a33
05661ec
f871a33
 
 
 
 
 
 
 
 
 
 
627e1c6
d582d65
bdcefa0
f871a33
 
 
 
 
 
 
 
 
 
 
 
d582d65
 
 
 
 
 
 
 
 
 
 
 
 
 
05661ec
 
 
 
 
 
 
d582d65
 
 
05661ec
 
d582d65
05661ec
 
 
 
 
 
 
627e1c6
f871a33
05661ec
f871a33
 
 
 
 
 
05661ec
 
627e1c6
f871a33
 
 
bdcefa0
 
d582d65
f871a33
627e1c6
f871a33
d582d65
 
f871a33
 
 
 
 
05661ec
627e1c6
f871a33
bdcefa0
 
 
f871a33
 
 
 
 
 
 
 
bdcefa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f871a33
627e1c6
f871a33
 
 
 
 
627e1c6
 
 
f871a33
 
05661ec
627e1c6
 
 
 
 
 
 
bdcefa0
 
 
 
 
 
 
 
 
 
 
 
f871a33
 
 
 
 
 
05661ec
f871a33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
import gradio as gr
from langchain_community.llms import LlamaCpp
import os
import json
import torch
import logging
from typing import Optional, List, Dict, Any
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import uvicorn
import time
from threading import Lock
from pathlib import Path
from huggingface_hub import hf_hub_download, list_repo_files
from contextlib import asynccontextmanager

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[Dict[str, str]]
    temperature: Optional[float] = 0.7
    max_tokens: Optional[int] = 2048
    stream: Optional[bool] = False

def get_model_filename():
    """Get the correct model filename from the repository."""
    try:
        logger.info("Listing repository files...")
        files = list_repo_files("G17c21ds/Qwen2.5-14B-Instruct-Uncensored-Q8_0-GGUF")
        gguf_files = [f for f in files if f.endswith('.gguf')]
        if not gguf_files:
            raise ValueError("No GGUF model files found in repository")
        logger.info(f"Found model files: {gguf_files}")
        return gguf_files[0]
    except Exception as e:
        logger.error(f"Error listing repository files: {str(e)}")
        raise

def download_model_from_hf():
    """Download the model file from Hugging Face."""
    try:
        logger.info("Downloading model from Hugging Face Hub...")
        model_dir = Path("models")
        model_dir.mkdir(exist_ok=True)
        
        model_filename = get_model_filename()
        logger.info(f"Using model file: {model_filename}")
        
        local_path = hf_hub_download(
            repo_id="G17c21ds/Qwen2.5-14B-Instruct-Uncensored-Q8_0-GGUF",
            filename=model_filename,
            local_dir=model_dir,
            local_dir_use_symlinks=False
        )
        return Path(local_path)
    except Exception as e:
        logger.error(f"Error downloading model: {str(e)}")
        raise

class QwenModel:
    def __init__(self):
        """Initialize the Qwen model with automatic device detection."""
        try:
            self.has_gpu = torch.cuda.is_available()
            self.device_count = torch.cuda.device_count() if self.has_gpu else 0
            logger.info(f"GPU available: {self.has_gpu}, Device count: {self.device_count}")

            model_path = download_model_from_hf()
            logger.info(f"Model path: {model_path}")

            n_gpu_layers = 40 if self.has_gpu else 0
            logger.info(f"Using {'GPU' if self.has_gpu else 'CPU'} for inference")

            n_batch = 512 if self.has_gpu else 64
            n_ctx = 2048 if not self.has_gpu else 4096

            self.llm = LlamaCpp(
                model_path=str(model_path),
                n_gpu_layers=n_gpu_layers,
                n_ctx=n_ctx,
                n_batch=n_batch,
                verbose=True,
                temperature=0.7,
                max_tokens=2048,
                top_p=0.95,
                top_k=50,
                f16_kv=self.has_gpu,
                use_mlock=True,
                use_mmap=True,
                seed=42,
                repeat_penalty=1.1,
                rope_scaling={"type": "linear", "factor": 1.0},
            )
            
            self.lock = Lock()
            
        except Exception as e:
            logger.error(f"Failed to initialize model: {str(e)}")
            raise

    def generate_cot_prompt(self, messages: List[Dict[str, str]]) -> str:
        """Generate a chain-of-thought prompt from message history."""
        conversation = []
        for msg in messages:
            role = msg.get("role", "")
            content = msg.get("content", "")
            
            if role == "system":
                conversation.append(f"System: {content}")
            elif role == "user":
                conversation.append(f"Human: {content}")
            elif role == "assistant":
                conversation.append(f"Assistant: {content}")

        last_user_msg = next((msg["content"] for msg in reversed(messages) 
                            if msg["role"] == "user"), None)
        
        if not last_user_msg:
            raise ValueError("No user message found in the conversation")

        cot_template = f"""Previous conversation:
{chr(10).join(conversation)}

Let's approach the latest question step-by-step:

1. Understanding the question:
   {last_user_msg}

2. Breaking down components:
   - Key elements to consider
   - Specific information requested
   - Relevant constraints

3. Reasoning process:
   - Systematic approach
   - Applicable knowledge
   - Potential challenges

4. Step-by-step solution:

"""
        return cot_template

    def process_response(self, response: str) -> str:
        """Process and format the model's response."""
        try:
            response = response.strip()
            if not response.startswith("Step"):
                response = "Step-by-step solution:\n" + response
            return response
        except Exception as e:
            logger.error(f"Error processing response: {str(e)}")
            return "Error processing response"

    def generate_response(self, 
                         messages: List[Dict[str, str]], 
                         temperature: float = 0.7, 
                         max_tokens: int = 2048) -> Dict[str, Any]:
        """Generate a response using chain-of-thought reasoning."""
        try:
            with self.lock:
                full_prompt = self.generate_cot_prompt(messages)
                
                start_time = time.time()
                response = self.llm(
                    full_prompt,
                    temperature=temperature,
                    max_tokens=max_tokens
                )
                end_time = time.time()
                
                processed_response = self.process_response(response)
                
                return {
                    "id": f"chatcmpl-{int(time.time()*1000)}",
                    "object": "chat.completion",
                    "created": int(time.time()),
                    "model": "qwen-2.5-14b",
                    "choices": [{
                        "index": 0,
                        "message": {
                            "role": "assistant",
                            "content": processed_response
                        },
                        "finish_reason": "stop"
                    }],
                    "usage": {
                        "prompt_tokens": len(full_prompt.split()),
                        "completion_tokens": len(processed_response.split()),
                        "total_tokens": len(full_prompt.split()) + len(processed_response.split())
                    },
                    "system_info": {
                        "device": "gpu" if self.has_gpu else "cpu",
                        "processing_time": round(end_time - start_time, 2)
                    }
                }
        except Exception as e:
            logger.error(f"Error generating response: {str(e)}")
            raise HTTPException(status_code=500, detail=str(e))

def create_gradio_interface(model: QwenModel):
    """Create and configure the Gradio interface."""
    
    def predict(message: str, 
                temperature: float, 
                max_tokens: int) -> str:
        messages = [{"role": "user", "content": message}]
        response = model.generate_response(
            messages, 
            temperature=temperature, 
            max_tokens=max_tokens
        )
        return response["choices"][0]["message"]["content"]

    iface = gr.Interface(
        fn=predict,
        inputs=[
            gr.Textbox(
                label="Input",
                placeholder="Enter your question or task here...",
                lines=5
            ),
            gr.Slider(
                minimum=0.1,
                maximum=1.0,
                value=0.7,
                label="Temperature",
                info="Higher values make the output more random"
            ),
            gr.Slider(
                minimum=64,
                maximum=4096,
                value=2048,
                step=64,
                label="Max Tokens",
                info="Maximum length of the generated response"
            )
        ],
        outputs=gr.Textbox(label="Response", lines=10),
        title="Qwen 2.5 14B Instruct Model",
        description="""This is a Qwen 2.5 14B model interface with chain-of-thought prompting.
        The model will break down complex problems and solve them step by step.""",
        examples=[
            ["Explain how photosynthesis works", 0.7, 2048],
            ["Solve the quadratic equation: x² + 5x + 6 = 0", 0.7, 1024],
            ["What are the implications of Moore's Law for future computing?", 0.8, 2048]
        ]
    )
    return iface

# Initialize FastAPI with lifespan
app = FastAPI(title="Qwen 2.5 API")

# Global model instance
model = None

@asynccontextmanager
async def lifespan(app: FastAPI):
    """Lifespan context manager for FastAPI startup and shutdown events."""
    global model
    try:
        model = QwenModel()
        logger.info("Model initialized successfully")
        yield
    finally:
        pass

app = FastAPI(lifespan=lifespan)

@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest):
    """OpenAI-compatible chat completions endpoint."""
    try:
        response = model.generate_response(
            request.messages,
            temperature=request.temperature,
            max_tokens=request.max_tokens
        )
        return JSONResponse(content=response)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

def main():
    """Main function to initialize and launch the application."""
    try:
        global model
        if model is None:
            model = QwenModel()
        
        interface = create_gradio_interface(model)
        app.mount("/", interface.app)
        
        uvicorn.run(
            app,
            host="0.0.0.0",
            port=7860,
            log_level="info"
        )
    except Exception as e:
        logger.error(f"Application failed to start: {str(e)}")
        raise

if __name__ == "__main__":
    main()