Spaces:
Running
Running
File size: 4,654 Bytes
de2d4cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
<script lang="ts">
import Prompt from "$lib/components/Prompt.svelte";
import SelectModel from "$lib/components/SelectModel.svelte";
import Input from "$lib/components/Input.svelte";
import Toggle from "$lib/components/Toggle.svelte";
import { TEXT_GENERATIONS } from "$lib/utils/models";
export let form: Record<string, any>;
export let onForm: (form: Record<string, any>) => void;
</script>
<SelectModel value={form.model} items={TEXT_GENERATIONS} onChange={(model) => onForm({ ...form, model })} />
<Prompt
value={form.inputs}
onChange={(inputs) => onForm({ ...form, inputs })}
/>
<div class="flex items-center justify-start gap-4 mt-3">
<p class="text-slate-500 uppercase font-medium text-sm">
Optional parameters
</p>
<div class="w-full flex-1 h-[1px] bg-slate-600" />
</div>
<div class="grid grid-cols-2 gap-x-10 gap-y-6">
<div class="flex flex-col gap-3">
<Toggle
label="Do Sample"
tooltip="If set to False (the default), the generation method will be greedy search, which selects the most probable continuation sequence after the prompt you provide. Greedy search is deterministic, so the same results will always be returned from the same input. When do_sample is True, tokens will be sampled from a probability distribution and will therefore vary across invocations."
checked={form?.parameters?.do_sample}
onChange={(do_sample) =>
onForm({
...form,
parameters: { ...form.parameters, do_sample },
})
}
/>
<Input
label="Max new tokens"
type="number"
tooltip="Whether to include the input sequence in the output returned by the endpoint. The default used by InferenceClient is False, but the endpoint itself uses True by default."
min={1}
max={244}
sanitize={(value) => {
const valueAsNumber = Number(value);
if (valueAsNumber < 1) return 1;
if (valueAsNumber > 244) return 244;
return valueAsNumber;
}}
value={form?.parameters?.max_new_tokens}
onChange={(max_new_tokens) =>
onForm({
...form,
parameters: { ...form.parameters, max_new_tokens },
})
}
/>
<Input
label="Temperature"
type="number"
tooltip="Controls the amount of variation we desire from the generation. A temperature of 0 is equivalent to greedy search. If we set a value for temperature, then do_sample will automatically be enabled. The same thing happens for top_k and top_p. When doing code-related tasks, we want less variability and hence recommend a low temperature. For other tasks, such as open-ended text generation, we recommend a higher one."
min={0}
max={1}
sanitize={(value) => {
const valueAsNumber = Number(value);
if (valueAsNumber > 1) return 1;
if (valueAsNumber < 0) return 0;
return valueAsNumber;
}}
value={form?.parameters?.temperature}
onChange={(temperature) =>
onForm({
...form,
parameters: { ...form.parameters, temperature },
})
}
/>
</div>
<div class="flex flex-col gap-3">
<Toggle
label="Return full Text"
tooltip="Whether to include the input sequence in the output returned by the endpoint. The default used by InferenceClient is False, but the endpoint itself uses True by default."
checked={form?.parameters?.return_full_text}
onChange={(return_full_text) =>
onForm({
...form,
parameters: { ...form.parameters, return_full_text },
})
}
/>
<Input
label="Top K"
type="number"
tooltip={`Enables "Top-K" sampling: the model will choose from the K most probable tokens that may occur after the input sequence. Typical values are between 10 to 50.`}
min={10}
max={50}
sanitize={(value) => {
const valueAsNumber = Number(value);
if (valueAsNumber < 10) return 10;
if (valueAsNumber > 50) return 50;
return valueAsNumber;
}}
value={form?.parameters?.top_k}
onChange={(top_k) =>
onForm({
...form,
parameters: { ...form.parameters, top_k },
})
}
/>
<Input
label="Stop sequences"
tooltip="A list of sequences that will cause generation to stop when encountered in the output."
subLabel="Separate each sequence with a comma"
value={form?.parameters?.stop_sequences}
onChange={(stop_sequences) =>
onForm({
...form,
parameters: { ...form.parameters, stop_sequences },
})
}
/>
</div>
</div>
|