import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import os # Get the value of the HF_TOKEN environment variable token = os.environ.get('HF_TOKEN') # Load model and tokenizer from Hugging Face model_name = "iqrabatool/finetuned_LLaMA" # Define a smaller subset of the model or load a smaller version if available model = AutoModelForCausalLM.from_pretrained(model_name, token=token) tokenizer = AutoTokenizer.from_pretrained(model_name, token=token) def respond(message, system_message, max_tokens, temperature, top_p): # Generate response inputs = tokenizer(message, return_tensors="pt", max_length=max_tokens, truncation=True, padding=True) outputs = model.generate(**inputs, temperature=temperature, top_p=top_p) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Define simplified interface components additional_inputs = [ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=512, value=256, step=1, label="Max new tokens"), # Limit max tokens gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), # Reduce temperature range gr.Slider(minimum=0.1, maximum=0.9, value=0.5, step=0.05, label="Top-p (nucleus sampling)"), # Reduce top-p range ] # Create the simplified ChatInterface demo = gr.Interface( fn=respond, inputs=["text", "text", "number", "number", "number"], outputs="text", title="Health Bot", description="A simplified chatbot for health-related inquiries.", article="The Health Bot assists users with health-related questions and provides information based on a pre-trained language model.", examples=[["What are the symptoms of COVID-19?", "Health Bot: COVID-19 symptoms include..."]], additional_inputs=additional_inputs ) if __name__ == "__main__": demo.launch()