Spaces:
Paused
Paused
Commit
·
af0e4ed
1
Parent(s):
a43c59b
Update app.py
Browse files
app.py
CHANGED
@@ -169,16 +169,27 @@ peft_config = LoraConfig(
|
|
169 |
# model = GPT4All(model_path)
|
170 |
|
171 |
def generate_text(prompt):
|
172 |
-
# result = model.generate(prompt)
|
173 |
-
# return result
|
174 |
-
|
175 |
-
# prompt = input()
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
|
183 |
text_generation_interface = gr.Interface(
|
184 |
fn=generate_text,
|
|
|
169 |
# model = GPT4All(model_path)
|
170 |
|
171 |
def generate_text(prompt):
|
172 |
+
# # result = model.generate(prompt)
|
173 |
+
# # return result
|
174 |
+
# logging.set_verbosity(logging.CRITICAL)
|
175 |
+
# # prompt = input()
|
176 |
+
# additional_prompt = "You are an AI Medical customer care bot. Please provide detailed and complete answers for only medical questions."
|
177 |
+
# prompt = additional_prompt + prompt
|
178 |
+
# pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
179 |
+
# result = pipe(f"<s>[INST] {prompt} [/INST]")
|
180 |
+
# output = result[0]['generated_text']
|
181 |
+
# question = row['Question']
|
182 |
+
# print(question)
|
183 |
+
|
184 |
+
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
185 |
+
result = pipe(f"<s>[INST] {prompt} [/INST]")
|
186 |
+
generated_text = result[0]['generated_text']
|
187 |
+
|
188 |
+
split_text = generated_text.split("[/INST]")
|
189 |
+
generated_content = split_text[1].strip()
|
190 |
+
|
191 |
+
prediction = generated_content.split("[/]")[0]
|
192 |
+
return prediction
|
193 |
|
194 |
text_generation_interface = gr.Interface(
|
195 |
fn=generate_text,
|