chatbot-backend / src /llms /base_llm.py
TalatMasood's picture
initial commit
640b1c8
raw
history blame
1.24 kB
# src/llms/base_llm.py
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any
class BaseLLM(ABC):
@abstractmethod
def generate(
self,
prompt: str,
max_tokens: Optional[int] = None,
temperature: float = 0.7,
**kwargs
) -> str:
"""
Generate a response based on the given prompt
Args:
prompt (str): Input prompt for the model
max_tokens (Optional[int]): Maximum number of tokens to generate
temperature (float): Sampling temperature for randomness
Returns:
str: Generated response
"""
pass
@abstractmethod
def tokenize(self, text: str) -> List[str]:
"""
Tokenize the input text
Args:
text (str): Input text to tokenize
Returns:
List[str]: List of tokens
"""
pass
@abstractmethod
def count_tokens(self, text: str) -> int:
"""
Count tokens in the input text
Args:
text (str): Input text to count tokens
Returns:
int: Number of tokens
"""
pass