File size: 1,265 Bytes
74dace5
35cce96
e6f89d5
35cce96
 
 
 
42cdc8f
35cce96
 
 
 
 
42cdc8f
e6f89d5
35cce96
74dace5
35cce96
 
 
 
 
 
42cdc8f
e6f89d5
35cce96
74dace5
35cce96
 
 
 
42cdc8f
74dace5
42cdc8f
35cce96
 
42cdc8f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from anthropic import HUMAN_PROMPT, Anthropic, AsyncAnthropic
from dotenv import load_dotenv
from langsmith.run_helpers import traceable

load_dotenv()


class AnthropicCustom:
    def __init__(self, api_key, model, max_tokens=1000, prompt=""):
        self.api_key = api_key
        self.model = model
        self.max_tokens = max_tokens
        self.prompt = prompt

    @traceable(run_type="llm", name="Claude", tags=["ai", "anthropic"])
    def get_anthropic_response(self):
        syncClient = Anthropic(api_key=self.api_key, timeout=5)
        response = syncClient.completions.create(
            prompt=self.prompt,
            model=self.model,
            max_tokens_to_sample=self.max_tokens,
        )
        return response.completion

    @traceable(run_type="llm", name="Claude", tags=["ai", "anthropic"])
    async def get_anthropic_response_async(self):
        asyncClient = AsyncAnthropic(api_key=self.api_key, timeout=60)
        async for line in await asyncClient.completions.create(
            prompt=self.prompt,
            model=self.model,
            max_tokens_to_sample=self.max_tokens,
            stop_sequences=[
                HUMAN_PROMPT,
            ],
            stream=True,
        ):
            yield line.completion