from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import gradio as gr import torch from peft import PeftConfig, PeftModel # Loading PEFT model PEFT_MODEL = "TurtleLiu/mistral7b_psychology_bot" config = PeftConfig.from_pretrained(PEFT_MODEL) bnb_config = BitsAndBytesConfig( load_in_4bit= True, bnb_4bit_quant_type= "nf4", bnb_4bit_compute_dtype= torch.bfloat16, bnb_4bit_use_double_quant= False, ) peft_base_model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, return_dict=True, quantization_config=bnb_config, device_map="auto", trust_remote_code=True, ) model = PeftModel.from_pretrained(peft_base_model, PEFT_MODEL) model = model.merge_and_unload() # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "right" # Generate response def format_prompt(message, history): prompt = "" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" return prompt pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200, do_sample=True, max_new_tokens=1024, temperature=0.9, top_k=50, top_p=0.95, num_return_sequences=1) def generate_response(message, history): prompt = "" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" result = pipe(f"{prompt}")[0]['generated_text'] return result ''' def generate_response(prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0, **kwargs,): temperature = float(temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) runtimeFlag = "cuda:0" formatted_prompt = format_prompt(f"{prompt}", history) inputs = tokenizer([formatted_prompt], return_tensors="pt").to(runtimeFlag) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, do_sample=True, **kwargs, ) generation_output = model.generate( **inputs, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, ) ''' # UI design examples=[ ["Patient is feeling stressed due to work and has trouble sleeping.", None, None, None, None, None], ["Client is dealing with relationship issues and is seeking advice on communication strategies.", None, None, None, None, None], ["Individual has recently experienced a loss and is having difficulty coping with grief.", None, None, None, None, None], ] gr.ChatInterface( fn=generate_response, chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), title="Psychological Assistant: Expert in Assessment and Strategic Planning", description="Enter counseling notes to generate an assessment and plan.", examples=examples, concurrency_limit=20, ).launch(show_api=False, debug=True) ''' from huggingface_hub import InferenceClient import gradio as gr client = InferenceClient( "TurtleLiu/mistral7b_psychology_bot" ) def format_prompt(message, history): prompt = "" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" return prompt def format_prompt(message, history): prompt = "" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] As a psychology counselor assistant, provide an assessment and plan for the following counseling notes. Please present a summary, don't make it so long. Present in lines.: {message} [/INST]" return prompt def generate( prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0, ): temperature = float(temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) formatted_prompt = format_prompt(f"{prompt}", history) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text yield output return output examples=[ ["Patient is feeling stressed due to work and has trouble sleeping.", None, None, None, None, None], ["Client is dealing with relationship issues and is seeking advice on communication strategies.", None, None, None, None, None], ["Individual has recently experienced a loss and is having difficulty coping with grief.", None, None, None, None, None], ] gr.ChatInterface( fn=generate, chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), title="Psychological Assistant: Expert in Assessment and Strategic Planning", description="Enter counseling notes to generate an assessment and plan.", examples=examples, concurrency_limit=20, ).launch(show_api=False, debug=True) '''