import os from typing import List, Tuple import openai # Assuming you're using OpenAI's API (make sure to install the OpenAI package) from flask import Flask, request, jsonify # Initialize Flask app app = Flask(__name__) # Set the OpenAI API key openai.api_key = os.getenv("OPENAI_API_KEY") # Define a system message SYSTEM_MESSAGE = "You are a helpful assistant." # Function to generate AI response def generate_response( user_input: str, history: List[Tuple[str, str]], max_tokens: int = 150, temperature: float = 0.7, top_p: float = 1.0 ) -> str: """ Generates a response from the AI model. Args: user_input: The user's input message. history: A list of tuples containing the conversation history (user input, AI response). max_tokens: The maximum number of tokens in the generated response. temperature: Controls the randomness of the generated response. top_p: Controls the nucleus sampling probability. Returns: str: The generated response from the AI model. """ try: # Build the message list with system message and history messages = [{"role": "system", "content": SYSTEM_MESSAGE}] # Iterate through the history list and format accordingly for user_message, assistant_message in history: messages.append({"role": "user", "content": user_message}) messages.append({"role": "assistant", "content": assistant_message}) # Add the current user input messages.append({"role": "user", "content": user_input}) # Generate response from the model response = "" for msg in openai.ChatCompletion.create( model="gpt-3.5-turbo", # You can use any model you prefer messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True ): # Check if 'choices' is present and non-empty if msg and 'choices' in msg and msg['choices']: # Ensure the 'delta' and 'content' properties exist before using them token = msg['choices'][0].get('delta', {}).get('content', '') if token: response += token else: # Handle unexpected response format or empty choices print("Warning: Unexpected response format or empty 'choices'.") break return response or "Sorry, I couldn't generate a response. Please try again." except Exception as e: # Log the error for debugging purposes print(f"An error occurred: {e}") return "Error: An unexpected error occurred while processing your request." # Route to handle user input and generate responses @app.route("/chat", methods=["POST"]) def chat(): try: # Get user input from the request user_input = request.json.get("user_input", "") history = request.json.get("history", []) # Generate the AI response response = generate_response( user_input=user_input, history=history ) # Return the response as JSON return jsonify({"response": response}) except Exception as e: return jsonify({"error": str(e)}), 500 if __name__ == "__main__": # Run the app app.run(debug=True, host="0.0.0.0", port=5000)