|
from typing import List, Optional |
|
|
|
def intent_detection( |
|
text: str, |
|
model: str, |
|
candidate_intents: Optional[List[str]] = None, |
|
custom_instructions: str = "", |
|
use_llm: bool = True |
|
) -> str: |
|
if not text or not text.strip(): |
|
return "Please enter input text." |
|
if use_llm: |
|
return _intent_detection_with_llm(text, model, candidate_intents, custom_instructions) |
|
else: |
|
return _intent_detection_with_traditional(text, model, candidate_intents) |
|
|
|
from llms import LLM |
|
|
|
def _intent_detection_with_llm( |
|
text: str, |
|
model: str, |
|
candidate_intents: Optional[List[str]], |
|
custom_instructions: str |
|
) -> str: |
|
try: |
|
llm = LLM(model=model) |
|
if candidate_intents: |
|
prompt = ( |
|
f"Classify the intent of the following text from this list: {', '.join(candidate_intents)}.\n" |
|
f"Return ONLY the best intent name.\n" |
|
+ (f"{custom_instructions}\n" if custom_instructions else "") |
|
+ f"Text: {text}\nIntent:" |
|
) |
|
else: |
|
prompt = ( |
|
f"Detect the intent of the following text.\n" |
|
f"Return ONLY the intent name, do not explain.\n" |
|
+ (f"{custom_instructions}\n" if custom_instructions else "") |
|
+ f"Text: {text}\nIntent:" |
|
) |
|
result = llm.generate(prompt) |
|
return result.strip() |
|
except Exception as e: |
|
print(f"Error in LLM intent detection: {str(e)}") |
|
return "Oops! Something went wrong. Please try again later." |
|
|
|
|
|
def _intent_detection_with_traditional( |
|
text: str, |
|
model: str, |
|
candidate_intents: Optional[List[str]] |
|
) -> str: |
|
|
|
return "[Traditional model intent detection not implemented yet]" |
|
|