# BEGIN GENERAL GGUF METADATA id: deepseek-r1-distill-qwen-14b model: deepseek-r1-distill-qwen-14b name: deepseek-r1-distill-qwen-14b version: 1 # END GENERAL GGUF METADATA # BEGIN INFERENCE PARAMETERS # BEGIN REQUIRED stop: - "<\uFF5Cend\u2581of\u2581sentence\uFF5C>" # END REQUIRED # BEGIN OPTIONAL stream: true top_p: 0.9 temperature: 0.7 frequency_penalty: 0 presence_penalty: 0 max_tokens: 4096 seed: -1 dynatemp_range: 0 dynatemp_exponent: 1 top_k: 40 min_p: 0.05 tfs_z: 1 typ_p: 1 repeat_last_n: 64 repeat_penalty: 1 mirostat: false mirostat_tau: 5 mirostat_eta: 0.100000001 penalize_nl: false ignore_eos: false n_probs: 0 min_keep: 0 # END OPTIONAL # END INFERENCE PARAMETERS # BEGIN MODEL LOAD PARAMETERS # BEGIN REQUIRED engine: llama-cpp prompt_template: "{system_message}<\uFF5CUser\uFF5C>{prompt}<\uFF5CAssistant\uFF5C\ >" ctx_len: 4096 ngl: 34 # END REQUIRED # END MODEL LOAD PARAMETERS