Question Answering
PEFT
English
medical
Tonic commited on
Commit
b324e27
1 Parent(s): 6b5ffb9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +47 -89
README.md CHANGED
@@ -66,120 +66,78 @@ See the [author's demo](https://huggingface.co/spaces/tonic/gaiaminimed)
66
  Use the code below to get started with the model.
67
 
68
  ```python
69
- from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
 
70
  from peft import PeftModel, PeftConfig
71
  import torch
72
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
- # Define the device
75
- device = "cuda" if torch.cuda.is_available() else "cpu"
76
-
77
 
78
- # Use model IDs as variables
79
  base_model_id = "tiiuae/falcon-7b-instruct"
80
  model_directory = "Tonic/GaiaMiniMed"
81
 
82
- # Instantiate the Tokenizer
83
  tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True, padding_side="left")
84
- tokenizer.pad_token = tokenizer.eos_token
85
- tokenizer.padding_side = 'left'
86
-
87
- # Define the PeftConfig
88
- #peft_config = PeftConfig(
89
- # max_length=500,
90
- # use_cache=True,
91
- # early_stopping=False,
92
- # bos_token_id=tokenizer.bos_token_id, # Use the tokenizer's BOS token ID
93
- # eos_token_id=tokenizer.eos_token_id, # Use the tokenizer's EOS token ID
94
- # pad_token_id=tokenizer.eos_token_id, # Use the tokenizer's EOS token ID
95
- # temperature=0.4,
96
- # do_sample=True
97
- #)
98
-
99
-
100
- # Load the GaiaMiniMed model with the specified configuration
101
- # Load the Peft model with a specific configuration
102
- # Specify the configuration class for the model
103
- model_config = PeftConfig.from_pretrained(model_directory) #use base falcon config
104
- # Load the PEFT model with the specified configuration
105
- peft_model = AutoModelForCausalLM.from_pretrained(base_model_id, config=model_config)
106
- peft_model = PeftModel.from_pretrained(model="Tonic/GaiaMiniMed")
107
- peft_model = PeftModel.from_pretrained(peft_model, "Tonic/GaiaMiniMed")
108
-
109
-
110
-
111
- # Class to encapsulate the Falcon chatbot
112
- class FalconChatBot:
113
  def __init__(self, system_prompt="You are an expert medical analyst:"):
114
  self.system_prompt = system_prompt
 
115
 
116
- def process_history(self, history):
117
- # Filter out special commands from the history
118
- filtered_history = []
119
- for message in history:
120
- user_message = message["user"]
121
- assistant_message = message["assistant"]
122
- # Check if the user_message is not a special command
123
- if not user_message.startswith("Falcon:"):
124
- filtered_history.append({"user": user_message, "assistant": assistant_message})
125
- return filtered_history
126
-
127
- def predict(self, input_data, max_length=500):
128
- # Extract messages from the input data
129
- preprompt = input_data["preprompt"]
130
- history = input_data["history"]
131
-
132
- # Process the history to remove special commands
133
- processed_history = self.process_history(history)
134
-
135
- # Generate the formatted conversation in Falcon message format
136
- conversation = f"{preprompt}\n"
137
- for message in processed_history:
138
- user_message = message["user"]
139
- assistant_message = message["assistant"]
140
- conversation += f"Falcon:{' ' + assistant_message if assistant_message else ''} User: {user_message}\n Falcon:\n"
141
-
142
- # Encode the formatted conversation using the tokenizer
143
- input_ids = tokenizer.encode(conversation, return_tensors="pt", add_special_tokens=False)
144
-
145
- # Generate a response using the Falcon model
146
- response = falcon_model.generate(input_ids, max_length=max_length, use_cache=True, early_stopping=True, bos_token_id=falcon_model.config.bos_token_id, eos_token_id=falcon_model.config.eos_token_id, pad_token_id=falcon_model.config.eos_token_id, temperature=0.4, do_sample=True)
147
-
148
- # Decode the generated response to text
149
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
 
 
150
 
151
  return response_text
152
 
153
- # Create the Falcon chatbot instance
154
- falcon_bot = FalconChatBot()
155
-
156
- # Define the Gradio interface
157
- title = "馃憢馃徎Welcome to Tonic's 馃Falcon's Medical馃懆馃徎鈥嶁殨锔廍xpert Chat馃殌"
158
- description = "You can use this Space to test out the GaiaMiniMed model [(Tonic/GaiaMiniMed)](https://huggingface.co/Tonic/GaiaMiniMed) or duplicate this Space and use it locally or on 馃HuggingFace. [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
159
- examples = [{
160
- "preprompt": "system message",
161
- "history": [{
162
- "user": "user message 1",
163
- "assistant": "assistant message 1"
164
- }, {
165
- "user": "user message 1",
166
- "assistant": None
167
- }]
168
- }]
169
 
170
  iface = gr.Interface(
171
- fn=falcon_bot.predict,
172
  title=title,
173
  description=description,
174
  examples=examples,
175
- inputs=[
176
- gr.inputs.Textbox(label="Input Data", type="json"),
177
- ],
178
  outputs="text",
179
  theme="ParityError/Anime"
180
  )
181
 
182
- # Launch the Gradio interface for the Falcon model
183
  iface.launch()
184
 
185
  ```
 
66
  Use the code below to get started with the model.
67
 
68
  ```python
69
+
70
+ from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
71
  from peft import PeftModel, PeftConfig
72
  import torch
73
  import gradio as gr
74
+ import random
75
+ from textwrap import wrap
76
+
77
+ def wrap_text(text, width=90):
78
+ lines = text.split('\n')
79
+ wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
80
+ wrapped_text = '\n'.join(wrapped_lines)
81
+ return wrapped_text
82
+
83
+ def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
84
+ formatted_input = f"{{{{ {system_prompt} }}}}\nUser: {user_input}\nFalcon:"
85
+ encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
86
+ model_inputs = encodeds.to(device)
87
+ output = model.generate(
88
+ **model_inputs,
89
+ max_length=500,
90
+ use_cache=True,
91
+ early_stopping=False,
92
+ bos_token_id=peft_model.config.bos_token_id,
93
+ eos_token_id=peft_model.config.eos_token_id,
94
+ pad_token_id=peft_model.config.eos_token_id,
95
+ temperature=0.4,
96
+ do_sample=True
97
+ )
98
+ response_text = tokenizer.decode(output[0], skip_special_tokens=True)
99
 
100
+ return response_text
 
 
101
 
102
+ device = "cuda" if torch.cuda.is_available() else "cpu"
103
  base_model_id = "tiiuae/falcon-7b-instruct"
104
  model_directory = "Tonic/GaiaMiniMed"
105
 
 
106
  tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True, padding_side="left")
107
+ model_config = AutoConfig.from_pretrained(base_model_id)
108
+ peft_model = AutoModelForCausalLM.from_pretrained(model_directory, config=model_config)
109
+ peft_model = PeftModel.from_pretrained(peft_model, model_directory)
110
+ class ChatBot:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  def __init__(self, system_prompt="You are an expert medical analyst:"):
112
  self.system_prompt = system_prompt
113
+ self.history = []
114
 
115
+ def predict(self, user_input, system_prompt):
116
+ formatted_input = f"{{{{ {self.system_prompt} }}}}\nUser: {user_input}\nFalcon:"
117
+ input_ids = tokenizer.encode(formatted_input, return_tensors="pt", add_special_tokens=False)
118
+ response = peft_model.generate(input_ids, max_length=900, use_cache=False, early_stopping=False, bos_token_id=peft_model.config.bos_token_id, eos_token_id=peft_model.config.eos_token_id, pad_token_id=peft_model.config.eos_token_id, temperature=0.4, do_sample=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
120
+ self.history.append(formatted_input)
121
+ self.history.append(response_text)
122
 
123
  return response_text
124
 
125
+ bot = ChatBot()
126
+
127
+ title = "馃憢馃徎Welcome to Tonic's GaiaMiniMed Chat馃殌"
128
+ description = "You can use this Space to test out the current model [(Tonic/GaiaMiniMed)](https://huggingface.co/Tonic/GaiaMiniMed) or duplicate this Space and use it locally or on 馃HuggingFace. [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
129
+ examples = [["What is the proper treatment for buccal herpes?", "You are a medicine and public health expert, you will receive a question, answer the question, and provide a complete answer"]]
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  iface = gr.Interface(
132
+ fn=bot.predict,
133
  title=title,
134
  description=description,
135
  examples=examples,
136
+ inputs=["text", "text"],
 
 
137
  outputs="text",
138
  theme="ParityError/Anime"
139
  )
140
 
 
141
  iface.launch()
142
 
143
  ```