import gradio as gr from sentence_transformers import SentenceTransformer, util import openai import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # Initialize paths and model identifiers for easy configuration and maintenance filename = "output_topic_details.txt" # Path to the file storing chess-specific details retrieval_model_name = 'output/sentence-transformer-finetuned/' openai.api_key = os.environ["OPENAI_API_KEY"] system_message = "You are a mental music chatbot specialized in providing a curated music playlist according to a user's input." # Initial system message to set the behavior of the assistant messages = [{"role": "system", "content": system_message}] # Attempt to load the necessary models and provide feedback on success or failure try: retrieval_model = SentenceTransformer(retrieval_model_name) print("Models loaded successfully.") except Exception as e: print(f"Failed to load models: {e}") def load_and_preprocess_text(filename): """ Load and preprocess text from a file, removing empty lines and stripping whitespace. """ try: with open(filename, 'r', encoding='utf-8') as file: segments = [line.strip() for line in file if line.strip()] print("Text loaded and preprocessed successfully.") return segments except Exception as e: print(f"Failed to load or preprocess text: {e}") return [] segments = load_and_preprocess_text(filename) def find_relevant_segment(user_query, segments): """ Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings. This version finds the best match based on the content of the query. """ try: # Lowercase the query for better matching lower_query = user_query.lower() # Encode the query and the segments query_embedding = retrieval_model.encode(lower_query) segment_embeddings = retrieval_model.encode(segments) # Compute cosine similarities between the query and the segments similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0] # Find the index of the most similar segment best_idx = similarities.argmax() # Return the most relevant segment return segments[best_idx] except Exception as e: print(f"Error in finding relevant segment: {e}") return "" def generate_response(user_query, relevant_segment): """ Generate a response emphasizing the bot's capability in providing music recommendations. """ try: user_message = f"Here's the information on music recommendations: {relevant_segment}" # Append user's message to messages list messages.append({"role": "user", "content": user_message}) response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, max_tokens=300, temperature=0.2, top_p=1, frequency_penalty=0, presence_penalty=0 ) # Extract the response text output_text = response['choices'][0]['message']['content'].strip() # Append assistant's message to messages list for context messages.append({"role": "assistant", "content": output_text}) return output_text except Exception as e: print(f"Error in generating response: {e}") return f"Error in generating response: {e}" def get_youtube_playlist(question): """ Check if the question contains a keyword and return the corresponding Youtube playlist link. """ keyword_links = { "sad": "Here's a Youtube playlist for when you're feeling sad: https://www.youtube.com/playlist?list=PLPE5bssIbSTm3odLzXrlfCXnmpll8pqBu", "hype": "Get hyped with this energetic Youtube playlist this upbeat Youtube playlist for when you're feeling happy: https://www.youtube.com/playlist?list=PLPE5bssIbSTnsN_-gd3-2sgHxm7i4CSSS", "mad":"Enjoy this energtic playlist when you are pressed:https://www.youtube.com/playlist?list=PLPE5bssIbSTnwU9lG7jdnuqzoKUKFbUGb", "chill": "Here is a playlist for a chill vibe: https://www.youtube.com/playlist?list=PLPE5bssIbSTkjkb4FfYcaI3tVtfE3IrAx", "delulu": "Here is a playlist for when you are yearning for someone or dealing with romance: https://www.youtube.com/playlist?list=PLPE5bssIbSTlfj8IbqgD2SRVh83Hs1TZG", "it_girl": "Here is a playlist for when you are feeling your best and are confident:https://www.youtube.com/playlist?list=PLPE5bssIbSTlPeSO5BbhKiQZ1RXVCz-id" } for keyword, link in keyword_links.items(): if keyword in question.lower(): return link # If no keyword matches, return None or handle accordingly return None def query_model(question): """ Process a question, find relevant information, and generate a response. """ youtube_playlist = get_youtube_playlist(question) if youtube_playlist: return f"Here is a link to the youtube playlist for more recs! Just copy this link: {youtube_playlist}" if question == "": return "Welcome to Mental Music Bot! Ask me anything about music recommendations!" relevant_segment = find_relevant_segment(question, segments) if not relevant_segment: return "Could not find specific information. Please refine your question." response = generate_response(question, relevant_segment) return response # Define the welcome message and specific topics the chatbot can provide information about welcome_message = """ # 🎶 Welcome to Mental Music Bot! ## Your AI-driven friend for all music recommendations based off your emotions. Created by Sarem, Davina, and Brea of the 2024 Kode With Klossy DC Camp. """ topics = """ ### Feel Free to ask me anything from the topics below! - music recommendations for when you're feeling sad - music recommendations for when you're feeling happy - music recommendations for when you're feeling confident """ # Setup the Gradio Blocks interface with custom layout components with gr.Blocks(theme='freddyaboulton/dracula_revamped') as demo: gr.Image("https://huggingface.co/spaces/MentalMusicBot-1/MentalMusicBot/resolve/main/BDS%20mental%20music%20bot%20-%20KWK%20Pitch%20Party.jpg", show_label = False, show_share_button = False, show_download_button = False) gr.Markdown(welcome_message) # Display the formatted welcome message with gr.Row(): with gr.Column(): gr.Markdown(topics) # Show the topics on the left side with gr.Row(): with gr.Column(): question = gr.Textbox(label="Your question", placeholder="How are you feeling today?") answer = gr.Textbox(label="Mental Music Bot Response", placeholder="Mental Music Bot will respond here...", interactive=False, lines=10) submit_button = gr.Button("Submit") submit_button.click(fn=query_model, inputs=question, outputs=answer) # Launch the Gradio app to allow user interaction demo.launch(share=True)