import gradio as gr import openai import langdetect as detect import os # Set up OpenAI API with custom Groq endpoint openai.api_key = os.getenv("PrepBuddy_API_KEY") openai.api_base = "https://api.groq.com/openai/v1" # Function to get the Groq model's response def get_groq_response(message, mode): try: # Use a system prompt tailored to the selected mode motivational_message = ( "Keep pushing forward! You've got this. Programming might seem tough at first, but every step you take " "is one step closer to mastering it. Let's score 70 on 70 in programming together!" ) if mode == "Code": system_prompt = ( "You are GS C PrepBuddy, a friendly and motivational AI specializing in C programming. " "Your goal is to assist users by providing clear, concise, and well-commented C code " "to solve their problems. Explain the logic where necessary. " + motivational_message ) elif mode == "Flowchart": system_prompt = ( "You are GS C PrepBuddy, a friendly and motivational AI specializing in C programming. " "Provide a detailed textual description of flowcharts to represent solutions for C programming problems. " + motivational_message ) elif mode == "Algorithm": system_prompt = ( "You are GS C PrepBuddy, a friendly and motivational AI specializing in C programming. " "Provide step-by-step algorithms to solve the user's query with clarity. " + motivational_message ) elif mode == "Exam Preparation": system_prompt = ( "You are GS C PrepBuddy, a friendly and motivational AI dedicated to helping students prepare for their exams. " "Provide explanations, theoretical concepts, sample questions, and problem-solving techniques in C programming. " + motivational_message ) else: system_prompt = "You are GS C PrepBuddy, a helpful and motivational AI assistant." response = openai.ChatCompletion.create( model="llama-3.1-70b-versatile", messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": message} ] ) return response.choices[0].message["content"] except Exception as e: return f"Error: {str(e)}" # Function to handle chatbot interactions def chatbot(user_input, output_format, history=[]): try: # Get the response based on the selected output format bot_response = get_groq_response(user_input, output_format) # Append to conversation history history.append((user_input, bot_response)) return history, history # Return updated chat history and state except Exception as e: return [(user_input, f"Error: {str(e)}")], history # Gradio Interface setup chat_interface = gr.Interface( fn=chatbot, # Function to call for chatbot interaction inputs=[ "text", # User input gr.Dropdown( ["Code", "Flowchart", "Algorithm", "Exam Preparation"], label="Output Format", value="Code" ), "state" # Chat history ], outputs=["chatbot", "state"], # Outputs: the chat and updated history (state) live=False, # Disable live chat, responses shown after submit title="GS C PrepBuddy", # Title of the app description=( "Welcome to GS Programming PrepBuddy! 💻✨\n\n" "Let's make programming fun and score 70 on 70 in your C programming exams!\n\n" "Choose your output format—Code, Flowchart, Algorithm, or Exam Preparation.\n\n" "Ask your query, and I'll guide you every step of the way!\n\n" "Made by Satyam Singhal" ) ) # Launch the Gradio interface if __name__ == "__main__": chat_interface.launch()