Update README.md
Browse files
README.md
CHANGED
@@ -84,7 +84,7 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
|
|
84 |
formatted_input = f"{{{{ {system_prompt} }}}}\nUser: {user_input}\nFalcon:"
|
85 |
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
|
86 |
model_inputs = encodeds.to(device)
|
87 |
-
output =
|
88 |
**model_inputs,
|
89 |
max_length=500,
|
90 |
use_cache=True,
|
@@ -107,6 +107,7 @@ tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True,
|
|
107 |
model_config = AutoConfig.from_pretrained(base_model_id)
|
108 |
peft_model = AutoModelForCausalLM.from_pretrained(model_directory, config=model_config)
|
109 |
peft_model = PeftModel.from_pretrained(peft_model, model_directory)
|
|
|
110 |
class ChatBot:
|
111 |
def __init__(self, system_prompt="You are an expert medical analyst:"):
|
112 |
self.system_prompt = system_prompt
|
@@ -115,11 +116,10 @@ class ChatBot:
|
|
115 |
def predict(self, user_input, system_prompt):
|
116 |
formatted_input = f"{{{{ {self.system_prompt} }}}}\nUser: {user_input}\nFalcon:"
|
117 |
input_ids = tokenizer.encode(formatted_input, return_tensors="pt", add_special_tokens=False)
|
118 |
-
response = peft_model.generate(input_ids, max_length=900, use_cache=False, early_stopping=False, bos_token_id=peft_model.config.bos_token_id, eos_token_id=peft_model.config.eos_token_id, pad_token_id=peft_model.config.eos_token_id, temperature=0.4, do_sample=True)
|
119 |
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
120 |
self.history.append(formatted_input)
|
121 |
self.history.append(response_text)
|
122 |
-
|
123 |
return response_text
|
124 |
|
125 |
bot = ChatBot()
|
@@ -133,7 +133,7 @@ iface = gr.Interface(
|
|
133 |
title=title,
|
134 |
description=description,
|
135 |
examples=examples,
|
136 |
-
inputs=["text", "text"],
|
137 |
outputs="text",
|
138 |
theme="ParityError/Anime"
|
139 |
)
|
|
|
84 |
formatted_input = f"{{{{ {system_prompt} }}}}\nUser: {user_input}\nFalcon:"
|
85 |
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
|
86 |
model_inputs = encodeds.to(device)
|
87 |
+
output = peft_model.generate(
|
88 |
**model_inputs,
|
89 |
max_length=500,
|
90 |
use_cache=True,
|
|
|
107 |
model_config = AutoConfig.from_pretrained(base_model_id)
|
108 |
peft_model = AutoModelForCausalLM.from_pretrained(model_directory, config=model_config)
|
109 |
peft_model = PeftModel.from_pretrained(peft_model, model_directory)
|
110 |
+
|
111 |
class ChatBot:
|
112 |
def __init__(self, system_prompt="You are an expert medical analyst:"):
|
113 |
self.system_prompt = system_prompt
|
|
|
116 |
def predict(self, user_input, system_prompt):
|
117 |
formatted_input = f"{{{{ {self.system_prompt} }}}}\nUser: {user_input}\nFalcon:"
|
118 |
input_ids = tokenizer.encode(formatted_input, return_tensors="pt", add_special_tokens=False)
|
119 |
+
response = peft_model.generate(input_ids=input_ids, max_length=900, use_cache=False, early_stopping=False, bos_token_id=peft_model.config.bos_token_id, eos_token_id=peft_model.config.eos_token_id, pad_token_id=peft_model.config.eos_token_id, temperature=0.4, do_sample=True)
|
120 |
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
121 |
self.history.append(formatted_input)
|
122 |
self.history.append(response_text)
|
|
|
123 |
return response_text
|
124 |
|
125 |
bot = ChatBot()
|
|
|
133 |
title=title,
|
134 |
description=description,
|
135 |
examples=examples,
|
136 |
+
inputs=["text", "text"],
|
137 |
outputs="text",
|
138 |
theme="ParityError/Anime"
|
139 |
)
|