File size: 2,043 Bytes
640b1c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# src/llms/ollama_llm.py
import requests
from typing import Optional, List

from .base_llm import BaseLLM

class OllamaLanguageModel(BaseLLM):
    def __init__(
        self, 
        base_url: str = 'http://localhost:11434', 
        model: str = 'llama2'
    ):
        """
        Initialize Ollama Language Model
        
        Args:
            base_url (str): Base URL for Ollama API
            model (str): Name of the Ollama model to use
        """
        self.base_url = base_url
        self.model = model
    
    def generate(
        self, 
        prompt: str, 
        max_tokens: Optional[int] = 150,
        temperature: float = 0.7,
        **kwargs
    ) -> str:
        """
        Generate response using Ollama API
        
        Args:
            prompt (str): Input prompt
            max_tokens (Optional[int]): Maximum tokens to generate
            temperature (float): Sampling temperature
        
        Returns:
            str: Generated response
        """
        response = requests.post(
            f"{self.base_url}/api/generate",
            json={
                "model": self.model,
                "prompt": prompt,
                "stream": False,
                "options": {
                    "temperature": temperature,
                    "num_predict": max_tokens
                }
            }
        )
        
        response.raise_for_status()
        return response.json().get('response', '').strip()
    
    def tokenize(self, text: str) -> List[str]:
        """
        Tokenize text 
        
        Args:
            text (str): Input text to tokenize
        
        Returns:
            List[str]: List of tokens
        """
        # Simple tokenization
        return text.split()
    
    def count_tokens(self, text: str) -> int:
        """
        Count tokens in the text
        
        Args:
            text (str): Input text to count tokens
        
        Returns:
            int: Number of tokens
        """
        return len(self.tokenize(text))