Spaces:
Running
Running
<script lang="ts"> | |
import Prompt from "$lib/components/Prompt.svelte"; | |
import SelectModel from "$lib/components/SelectModel.svelte"; | |
import Input from "$lib/components/Input.svelte"; | |
import Toggle from "$lib/components/Toggle.svelte"; | |
import { TEXT_GENERATIONS } from "$lib/utils/models"; | |
export let form: Record<string, any>; | |
export let onForm: (form: Record<string, any>) => void; | |
</script> | |
<SelectModel value={form.model} items={TEXT_GENERATIONS} onChange={(model) => onForm({ ...form, model })} /> | |
<Prompt | |
value={form.inputs} | |
onChange={(inputs) => onForm({ ...form, inputs })} | |
/> | |
<div class="flex items-center justify-start gap-4 mt-3"> | |
<p class="text-slate-500 uppercase font-medium text-sm"> | |
Optional parameters | |
</p> | |
<div class="w-full flex-1 h-[1px] bg-slate-600" /> | |
</div> | |
<div class="grid grid-cols-2 gap-x-10 gap-y-6"> | |
<div class="flex flex-col gap-3"> | |
<Toggle | |
label="Do Sample" | |
tooltip="If set to False (the default), the generation method will be greedy search, which selects the most probable continuation sequence after the prompt you provide. Greedy search is deterministic, so the same results will always be returned from the same input. When do_sample is True, tokens will be sampled from a probability distribution and will therefore vary across invocations." | |
checked={form?.parameters?.do_sample} | |
onChange={(do_sample) => | |
onForm({ | |
...form, | |
parameters: { ...form.parameters, do_sample }, | |
}) | |
} | |
/> | |
<Input | |
label="Max new tokens" | |
type="number" | |
tooltip="Whether to include the input sequence in the output returned by the endpoint. The default used by InferenceClient is False, but the endpoint itself uses True by default." | |
min={1} | |
max={244} | |
sanitize={(value) => { | |
const valueAsNumber = Number(value); | |
if (valueAsNumber < 1) return 1; | |
if (valueAsNumber > 244) return 244; | |
return valueAsNumber; | |
}} | |
value={form?.parameters?.max_new_tokens} | |
onChange={(max_new_tokens) => | |
onForm({ | |
...form, | |
parameters: { ...form.parameters, max_new_tokens }, | |
}) | |
} | |
/> | |
<Input | |
label="Temperature" | |
type="number" | |
tooltip="Controls the amount of variation we desire from the generation. A temperature of 0 is equivalent to greedy search. If we set a value for temperature, then do_sample will automatically be enabled. The same thing happens for top_k and top_p. When doing code-related tasks, we want less variability and hence recommend a low temperature. For other tasks, such as open-ended text generation, we recommend a higher one." | |
min={0} | |
max={1} | |
sanitize={(value) => { | |
const valueAsNumber = Number(value); | |
if (valueAsNumber > 1) return 1; | |
if (valueAsNumber < 0) return 0; | |
return valueAsNumber; | |
}} | |
value={form?.parameters?.temperature} | |
onChange={(temperature) => | |
onForm({ | |
...form, | |
parameters: { ...form.parameters, temperature }, | |
}) | |
} | |
/> | |
</div> | |
<div class="flex flex-col gap-3"> | |
<Toggle | |
label="Return full Text" | |
tooltip="Whether to include the input sequence in the output returned by the endpoint. The default used by InferenceClient is False, but the endpoint itself uses True by default." | |
checked={form?.parameters?.return_full_text} | |
onChange={(return_full_text) => | |
onForm({ | |
...form, | |
parameters: { ...form.parameters, return_full_text }, | |
}) | |
} | |
/> | |
<Input | |
label="Top K" | |
type="number" | |
tooltip={`Enables "Top-K" sampling: the model will choose from the K most probable tokens that may occur after the input sequence. Typical values are between 10 to 50.`} | |
min={10} | |
max={50} | |
sanitize={(value) => { | |
const valueAsNumber = Number(value); | |
if (valueAsNumber < 10) return 10; | |
if (valueAsNumber > 50) return 50; | |
return valueAsNumber; | |
}} | |
value={form?.parameters?.top_k} | |
onChange={(top_k) => | |
onForm({ | |
...form, | |
parameters: { ...form.parameters, top_k }, | |
}) | |
} | |
/> | |
<Input | |
label="Stop sequences" | |
tooltip="A list of sequences that will cause generation to stop when encountered in the output." | |
subLabel="Separate each sequence with a comma" | |
value={form?.parameters?.stop_sequences} | |
onChange={(stop_sequences) => | |
onForm({ | |
...form, | |
parameters: { ...form.parameters, stop_sequences }, | |
}) | |
} | |
/> | |
</div> | |
</div> | |