Test / app.py
Grandediw's picture
Update app.py
910563c verified
raw
history blame
2.1 kB
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("Grandediw/lora_model")
def respond(message, history, system_message, max_tokens, temperature, top_p):
# Build the prompt from system_message and the conversation history
# history is a list of (user_message, assistant_message) tuples
prompt = system_message.strip() + "\n\n"
for user_msg, assistant_msg in history:
if user_msg:
prompt += f"User: {user_msg}\n"
if assistant_msg:
prompt += f"Assistant: {assistant_msg}\n"
# Add the latest user message
prompt += f"User: {message}\nAssistant:"
response = ""
# Use text_generation instead of chat_completion
for partial in client.text_generation(
prompt=prompt,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True
):
# partial is a TextGenerationStreamResponse
token = partial.token.text # Extract the generated token text
response += token
yield response
with gr.Blocks(title="Text Generation Interface") as demo:
gr.Markdown("# LORA Text Generation Demo")
with gr.Column():
system_message = gr.Textbox(
value="You are a helpful and friendly assistant.",
label="System Prompt",
lines=3,
)
max_tokens = gr.Slider(
minimum=1, maximum=2048, value=512, step=1,
label="Max new tokens"
)
temperature = gr.Slider(
minimum=0.1, maximum=4.0, value=0.7, step=0.1,
label="Temperature"
)
top_p = gr.Slider(
minimum=0.1, maximum=1.0, value=0.95, step=0.05,
label="Top-p"
)
# Use type='tuple' if you want to maintain old style conversation format
# or omit it to use the default message format.
chat = gr.ChatInterface(
fn=respond,
additional_inputs=[system_message, max_tokens, temperature, top_p],
type='tuples'
)
if __name__ == "__main__":
demo.launch()