Spaces:
Sleeping
Sleeping
#from huggingface_hub import InferenceClient | |
import gradio as gr | |
from transformers import pipeline | |
# Load the model and tokenizer using the pipeline API | |
model_pipeline = pipeline("text-generation", model="grammarly/coedit-large") | |
def generate_text(input_text, temperature=0.9, max_new_tokens=50, top_p=0.95, top_k=50): | |
# Generate text using the model | |
output = model_pipeline(input_text, temperature=temperature, max_length=max_new_tokens + len(input_text.split()), top_p=top_p, top_k=top_k, return_full_text=False) | |
# Extract and return the generated text | |
return output[0]['generated_text'] | |
# Define your Gradio interface | |
iface = gr.Interface( | |
fn=generate_text, | |
inputs=[ | |
gr.inputs.Textbox(lines=2, label="Input Text"), | |
gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.9, label="Temperature"), | |
gr.inputs.Slider(minimum=1, maximum=100, step=1, default=50, label="Max New Tokens"), | |
gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.95, label="Top-p"), | |
gr.inputs.Slider(minimum=0, maximum=100, step=1, default=50, label="Top-k") | |
], | |
outputs=[gr.outputs.Textbox(label="Generated Text")], | |
title="Text Generation with Grammarly Model" | |
) | |
# Launch the interface | |
iface.launch() |