import gradio as gr from huggingface_hub import InferenceClient import numpy as np import pandas as pd from transformers import pipeline import torch model = "meta-llama/Meta-Llama-3-8B" generator=pipeline(task='text-generation', model=model) tones = { 'natural': 'human, authentic', 'fluency': 'readable, clarified', 'formal': 'sophistocated', 'academic': 'technical and scholarly', 'simple': 'simple and easily understandable', } def generate(text, max_length): x=generator(text, max_length=max_length, num_return_sequences=1) return x def respond(message, tone="natural", max_length=512): prompt = f"Paraphrase this text in a more {tones[tone]} way: {message}" text = generate(prompt, max_length) print(text) text = text[0]["generated_text"] if prompt in text: text = text.split(":", 1)[1] return text demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), ], ) if __name__ == "__main__": demo.launch()