File size: 1,822 Bytes
ea99abb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from typing import List, Optional
def intent_detection(
text: str,
model: str,
candidate_intents: Optional[List[str]] = None,
custom_instructions: str = "",
use_llm: bool = True
) -> str:
if not text or not text.strip():
return "Please enter input text."
if use_llm:
return _intent_detection_with_llm(text, model, candidate_intents, custom_instructions)
else:
return _intent_detection_with_traditional(text, model, candidate_intents)
from llms import LLM
def _intent_detection_with_llm(
text: str,
model: str,
candidate_intents: Optional[List[str]],
custom_instructions: str
) -> str:
try:
llm = LLM(model=model)
if candidate_intents:
prompt = (
f"Classify the intent of the following text from this list: {', '.join(candidate_intents)}.\n"
f"Return ONLY the best intent name.\n"
+ (f"{custom_instructions}\n" if custom_instructions else "")
+ f"Text: {text}\nIntent:"
)
else:
prompt = (
f"Detect the intent of the following text.\n"
f"Return ONLY the intent name, do not explain.\n"
+ (f"{custom_instructions}\n" if custom_instructions else "")
+ f"Text: {text}\nIntent:"
)
result = llm.generate(prompt)
return result.strip()
except Exception as e:
print(f"Error in LLM intent detection: {str(e)}")
return "Oops! Something went wrong. Please try again later."
def _intent_detection_with_traditional(
text: str,
model: str,
candidate_intents: Optional[List[str]]
) -> str:
# TODO: Implement traditional model inference
return "[Traditional model intent detection not implemented yet]"
|