nicolaakmal
commited on
Commit
•
7ed607e
1
Parent(s):
c35d40d
Update app.py
Browse files
app.py
CHANGED
@@ -151,19 +151,27 @@
|
|
151 |
from transformers import pipeline
|
152 |
import gradio as gr
|
153 |
|
|
|
|
|
|
|
154 |
def generate_response(inputs):
|
155 |
messages = [
|
156 |
{"role": "user", "content": inputs},
|
157 |
]
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
160 |
|
161 |
# Antarmuka Gradio
|
162 |
iface = gr.Interface(
|
163 |
fn=generate_response,
|
164 |
inputs="text",
|
165 |
outputs="text",
|
166 |
-
title="Chatbot LLaMA Finetuned"
|
|
|
|
|
167 |
)
|
168 |
|
169 |
# Jalankan aplikasi Gradio
|
@@ -173,3 +181,4 @@ iface.launch()
|
|
173 |
|
174 |
|
175 |
|
|
|
|
151 |
from transformers import pipeline
|
152 |
import gradio as gr
|
153 |
|
154 |
+
# Load the pipeline once, outside of the function
|
155 |
+
pipe = pipeline("text-generation", model="nicolaakmal/llama32-lora-finetuned-v3-fp16")
|
156 |
+
|
157 |
def generate_response(inputs):
|
158 |
messages = [
|
159 |
{"role": "user", "content": inputs},
|
160 |
]
|
161 |
+
# Generate the response using the pipeline
|
162 |
+
response = pipe(messages)
|
163 |
+
# Extract the generated text from the response and clean it up if necessary
|
164 |
+
generated_text = response[0]["generated_text"] if response else "No response generated"
|
165 |
+
return generated_text.strip() # Removing any unwanted whitespace
|
166 |
|
167 |
# Antarmuka Gradio
|
168 |
iface = gr.Interface(
|
169 |
fn=generate_response,
|
170 |
inputs="text",
|
171 |
outputs="text",
|
172 |
+
title="Chatbot LLaMA Finetuned",
|
173 |
+
description="A fine-tuned LLaMA model. Enter your questions or prompts below.",
|
174 |
+
theme="default" # Optional: set a theme if desired, such as 'huggingface' or 'dark'
|
175 |
)
|
176 |
|
177 |
# Jalankan aplikasi Gradio
|
|
|
181 |
|
182 |
|
183 |
|
184 |
+
|