Spaces:
Running
Running
# src/llms/openai_llm.py | |
import openai | |
from typing import Optional, List | |
from openai import OpenAI # Import the new client | |
from .base_llm import BaseLLM | |
class OpenAILanguageModel(BaseLLM): | |
def __init__(self, api_key: str, model: str = "gpt-3.5-turbo"): | |
""" | |
Initialize OpenAI Language Model | |
Args: | |
api_key (str): OpenAI API key | |
model (str): Name of the OpenAI model to use | |
""" | |
self.client = OpenAI(api_key=api_key) # Use the new client | |
self.model = model | |
def generate( | |
self, | |
prompt: str, | |
max_tokens: Optional[int] = 150, | |
temperature: float = 0.7, | |
**kwargs | |
) -> str: | |
""" | |
Generate response using OpenAI API | |
Args: | |
prompt (str): Input prompt | |
max_tokens (Optional[int]): Maximum tokens to generate | |
temperature (float): Sampling temperature | |
Returns: | |
str: Generated response | |
""" | |
response = self.client.chat.completions.create( | |
model=self.model, | |
messages=[{"role": "user", "content": prompt}], | |
max_tokens=max_tokens, | |
temperature=temperature, | |
**kwargs | |
) | |
return response.choices[0].message.content.strip() | |
def tokenize(self, text: str) -> List[str]: | |
""" | |
Tokenize text using OpenAI tokenizer | |
Args: | |
text (str): Input text to tokenize | |
Returns: | |
List[str]: List of tokens | |
""" | |
return text.split() | |
def count_tokens(self, text: str) -> int: | |
""" | |
Count tokens in the text | |
Args: | |
text (str): Input text to count tokens | |
Returns: | |
int: Number of tokens | |
""" | |
return len(self.tokenize(text)) |