File size: 6,463 Bytes
c211499
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import * as Core from '@anthropic-ai/sdk/core';
import { APIPromise } from '@anthropic-ai/sdk/core';
import { APIResource } from '@anthropic-ai/sdk/resource';
import * as CompletionsAPI from '@anthropic-ai/sdk/resources/completions';
import { Stream } from '@anthropic-ai/sdk/streaming';
export declare class Completions extends APIResource {
    /**
     * Create a completion
     */
    create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Completion>;
    create(body: CompletionCreateParamsStreaming, options?: Core.RequestOptions): APIPromise<Stream<Completion>>;
    create(body: CompletionCreateParamsBase, options?: Core.RequestOptions): APIPromise<Stream<Completion> | Completion>;
}
export interface Completion {
    /**
     * The resulting completion up to and excluding the stop sequences.
     */
    completion: string;
    /**
     * The model that performed the completion.
     */
    model: string;
    /**
     * The reason that we stopped sampling.
     *
     * This may be one the following values:
     *
     * - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
     *   `stop_sequences` parameter, or a stop sequence built into the model
     * - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
     */
    stop_reason: string;
}
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
export interface CompletionCreateParamsBase {
    /**
     * The maximum number of tokens to generate before stopping.
     *
     * Note that our models may stop _before_ reaching this maximum. This parameter
     * only specifies the absolute maximum number of tokens to generate.
     */
    max_tokens_to_sample: number;
    /**
     * The model that will complete your prompt.
     *
     * As we improve Claude, we develop new versions of it that you can query. This
     * parameter controls which version of Claude answers your request. Right now we
     * are offering two model families: Claude, and Claude Instant. You can use them by
     * setting `model` to `"claude-2"` or `"claude-instant-1"`, respectively. See
     * [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
     * additional details.
     */
    model: (string & {}) | 'claude-2' | 'claude-instant-1';
    /**
     * The prompt that you want Claude to complete.
     *
     * For proper response generation you will need to format your prompt as follows:
     *
     * ```javascript
     * const userQuestion = r"Why is the sky blue?";
     * const prompt = `\n\nHuman: ${userQuestion}\n\nAssistant:`;
     * ```
     *
     * See our
     * [comments on prompts](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
     * for more context.
     */
    prompt: string;
    /**
     * An object describing metadata about the request.
     */
    metadata?: CompletionCreateParams.Metadata;
    /**
     * Sequences that will cause the model to stop generating completion text.
     *
     * Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
     * sequences in the future. By providing the stop_sequences parameter, you may
     * include additional strings that will cause the model to stop generating.
     */
    stop_sequences?: Array<string>;
    /**
     * Whether to incrementally stream the response using server-sent events.
     *
     * See
     * [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
     * for details.
     */
    stream?: boolean;
    /**
     * Amount of randomness injected into the response.
     *
     * Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
     * multiple choice, and closer to 1 for creative and generative tasks.
     */
    temperature?: number;
    /**
     * Only sample from the top K options for each subsequent token.
     *
     * Used to remove "long tail" low probability responses.
     * [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
     */
    top_k?: number;
    /**
     * Use nucleus sampling.
     *
     * In nucleus sampling, we compute the cumulative distribution over all the options
     * for each subsequent token in decreasing probability order and cut it off once it
     * reaches a particular probability specified by `top_p`. You should either alter
     * `temperature` or `top_p`, but not both.
     */
    top_p?: number;
}
export declare namespace CompletionCreateParams {
    /**
     * An object describing metadata about the request.
     */
    interface Metadata {
        /**
         * An external identifier for the user who is associated with the request.
         *
         * This should be a uuid, hash value, or other opaque identifier. Anthropic may use
         * this id to help detect abuse. Do not include any identifying information such as
         * name, email address, or phone number.
         */
        user_id?: string;
    }
    type CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming;
    type CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming;
}
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase {
    /**
     * Whether to incrementally stream the response using server-sent events.
     *
     * See
     * [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
     * for details.
     */
    stream?: false;
}
export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase {
    /**
     * Whether to incrementally stream the response using server-sent events.
     *
     * See
     * [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
     * for details.
     */
    stream: true;
}
export declare namespace Completions {
    export import Completion = CompletionsAPI.Completion;
    export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams;
    export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming;
    export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming;
}
//# sourceMappingURL=completions.d.ts.map