import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import spaces # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("Yoxas/autotrain-gpt2-statistical1") model = AutoModelForCausalLM.from_pretrained("Yoxas/autotrain-gpt2-statistical1") # Use a pipeline as a high-level helper pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) # Define the chatbot function @spaces.GPU(duration=120) def chatbot(input_text): response = pipe(input_text, max_length=150, num_return_sequences=1) return response[0]['generated_text'] # Create the Gradio interface interface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Research Paper Abstract Chatbot") # Launch the Gradio app interface.launch()