Spaces:
Sleeping
Sleeping
import gradio as gr | |
import transformers | |
import torch | |
from peft import PeftModel | |
model_id = "JerniganLab/interviews-and-qa" | |
pipeline = transformers.pipeline( | |
"text-generation", | |
model="meta-llama/Meta-Llama-3-8B-Instruct", | |
model_kwargs={"torch_dtype": torch.bfloat16}, | |
device="cuda", | |
) | |
pipeline.model = PeftModel.from_pretrained(model=base_model, model_id) | |
def chat_function(message, history, system_prompt, max_new_tokens, temperature): | |
messages = [{"role":"system","content":system_prompt}, | |
{"role":"user", "content":message}] | |
prompt = pipeline.tokenizer.apply_chat_template( | |
messages, | |
tokenize=False, | |
add_generation_prompt=True,) | |
terminators = [ | |
pipeline.tokenizer.eos_token_id, | |
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")] | |
outputs = pipeline( | |
prompt, | |
max_new_tokens = max_new_tokens, | |
eos_token_id = terminators, | |
do_sample = True, | |
temperature = temperature + 0.1, | |
top_p = 0.9,) | |
return outputs[0]["generated_text"][len(prompt):] | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p (nucleus sampling)", | |
), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() | |