Delete qwen.py
Browse files
qwen.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
from typing import List, Dict, Optional, Tuple
|
3 |
-
|
4 |
-
import torch
|
5 |
-
# from transformers import pipeline
|
6 |
-
from huggingface_hub import InferenceClient
|
7 |
-
|
8 |
-
from config.config import token, SYSTEM_PROMPT
|
9 |
-
from services.whisper import generate_speech, transcribe
|
10 |
-
from services.search import WebSearcher
|
11 |
-
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
|
14 |
-
searcher = WebSearcher()
|
15 |
-
|
16 |
-
# Qwen Configuration
|
17 |
-
model_kwargs = {
|
18 |
-
"low_cpu_mem_usage": True,
|
19 |
-
"torch_dtype": torch.float32,
|
20 |
-
'use_cache': True
|
21 |
-
}
|
22 |
-
client = InferenceClient(
|
23 |
-
model="Qwen/Qwen2.5-0.5B-Instruct",
|
24 |
-
token=token
|
25 |
-
# trust_remote_code=True,
|
26 |
-
# device=device,
|
27 |
-
# model_kwargs=model_kwargs
|
28 |
-
)
|
29 |
-
|
30 |
-
async def respond(
|
31 |
-
audio: Optional[str] = None,
|
32 |
-
text: Optional[str] = None,
|
33 |
-
do_search: bool = False,
|
34 |
-
history: List[Dict] = None
|
35 |
-
) -> Tuple[Optional[str], str]:
|
36 |
-
try:
|
37 |
-
if text:
|
38 |
-
user_text = text.strip()
|
39 |
-
elif audio:
|
40 |
-
user_text = await transcribe(audio)
|
41 |
-
else:
|
42 |
-
return None, "No input provided"
|
43 |
-
|
44 |
-
# Build conversation context
|
45 |
-
messages = []
|
46 |
-
messages.append({"role": "system", "content": SYSTEM_PROMPT})
|
47 |
-
|
48 |
-
if history:
|
49 |
-
messages.extend(history)
|
50 |
-
|
51 |
-
# Format message history for Qwen
|
52 |
-
prompt = ""
|
53 |
-
for msg in messages:
|
54 |
-
role = msg["role"]
|
55 |
-
content = msg["content"]
|
56 |
-
prompt += f"<|im_start|>{role}\n{content}<|im_end|>\n"
|
57 |
-
|
58 |
-
# Add current user message
|
59 |
-
prompt += f"<|im_start|>user\n{user_text}<|im_end|>\n<|im_start|>assistant\n"
|
60 |
-
|
61 |
-
# Add web-search context if enabled
|
62 |
-
if do_search:
|
63 |
-
results = searcher.search(user_text)
|
64 |
-
if results:
|
65 |
-
search_context = "Based on search results:\n"
|
66 |
-
for result in results:
|
67 |
-
snippet = result['content'][:5000].strip()
|
68 |
-
search_context += f"{snippet}\n"
|
69 |
-
prompt = prompt.replace(SYSTEM_PROMPT, f"{SYSTEM_PROMPT}\n{search_context}")
|
70 |
-
|
71 |
-
# Generate response
|
72 |
-
reply = client.text_generation(
|
73 |
-
prompt,
|
74 |
-
max_new_tokens=300,
|
75 |
-
do_sample=True,
|
76 |
-
temperature=0.7,
|
77 |
-
top_p=0.9,
|
78 |
-
return_full_text=False
|
79 |
-
)
|
80 |
-
|
81 |
-
# Extract and clean assistant response
|
82 |
-
assistant_response = reply # Reply is already the generated text string
|
83 |
-
if "<|im_start|>assistant\n" in assistant_response:
|
84 |
-
assistant_response = assistant_response.split("<|im_start|>assistant\n")[-1]
|
85 |
-
if "<|im_end|>" in assistant_response:
|
86 |
-
assistant_response = assistant_response.split("<|im_end|>")[0]
|
87 |
-
assistant_response = assistant_response.strip()
|
88 |
-
|
89 |
-
# Convert response to speech
|
90 |
-
audio_path = await generate_speech(assistant_response)
|
91 |
-
return audio_path, assistant_response
|
92 |
-
|
93 |
-
except Exception as e:
|
94 |
-
logger.error(f"Error in respond: {str(e)}")
|
95 |
-
return None, "Sorry, I encountered an error"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|