Spaces:
Build error
Build error
File size: 17,658 Bytes
7293b6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 |
import os
import logging
import random
from botbuilder.core import TurnContext, MessageFactory
from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes
from tenacity import retry, wait_random_exponential, stop_after_attempt
import importlib
from sentiment_analysis import analyze_sentiment_vader
from config import load_and_validate_config, setup_logging
from universal_reasoning import UniversalReasoning
from dotenv import load_dotenv
import json
from chat import azure_chat_completion_request # Import the function from chat.py
from database import DatabaseConnection # Import the database connection
# Load environment variables from .env file
load_dotenv()
class MyBot:
def __init__(self, conversation_state, user_state, dialog, universal_reasoning):
self.conversation_state = conversation_state
self.user_state = user_state
self.dialog = dialog
self.universal_reasoning = universal_reasoning
self.context = {}
self.feedback = []
config = load_and_validate_config('config.json', 'config_schema.json')
# Add Azure OpenAI and LUIS configurations to the config
config['azure_openai_api_key'] = os.getenv('AZURE_OPENAI_API_KEY')
config['azure_openai_endpoint'] = os.getenv('AZURE_OPENAI_ENDPOINT')
config['luis_endpoint'] = os.getenv('LUIS_ENDPOINT')
config['luis_api_version'] = os.getenv('LUIS_API_VERSION')
config['luis_api_key'] = os.getenv('LUIS_API_KEY')
setup_logging(config)
async def enhance_context_awareness(self, user_id: str, text: str) -> None:
"""Enhance context awareness by analyzing the user's environment, activities, and emotional state."""
sentiment = analyze_sentiment_vader(text)
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"text": text, "sentiment": sentiment})
async def proactive_learning(self, user_id: str, feedback: str) -> None:
"""Encourage proactive learning by seeking feedback and exploring new topics."""
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"feedback": feedback})
self.feedback.append({"user_id": user_id, "feedback": feedback})
async def ethical_decision_making(self, user_id: str, decision: str) -> None:
"""Integrate ethical principles into decision-making processes."""
ethical_decision = f"Considering ethical principles, the decision is: {decision}"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"ethical_decision": ethical_decision})
async def emotional_intelligence(self, user_id: str, text: str) -> str:
"""Develop emotional intelligence by recognizing and responding to user emotions."""
sentiment = analyze_sentiment_vader(text)
response = self.generate_emotional_response(sentiment, text)
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"emotional_response": response})
return response
def generate_emotional_response(self, sentiment: dict, text: str) -> str:
"""Generate an empathetic response based on the sentiment analysis."""
if sentiment['compound'] >= 0.05:
return "I'm glad to hear that! π How can I assist you further?"
elif sentiment['compound'] <= -0.05:
return "I'm sorry to hear that. π’ Is there anything I can do to help?"
else:
return "I understand. How can I assist you further?"
async def transparency_and_explainability(self, user_id: str, decision: str) -> str:
"""Enable transparency by explaining the reasoning behind decisions."""
explanation = f"The decision was made based on the following context: {self.context[user_id]}"
if user_id not in self.context:
self.context[user_id] = []
self.context[user_id].append({"explanation": explanation})
return explanation
async def on_message_activity(self, turn_context: TurnContext) -> None:
"""Handles incoming messages and generates responses."""
user_id = turn_context.activity.from_property.id
if user_id not in self.context:
self.context[user_id] = []
try:
if "end" in turn_context.activity.text.lower() or "stop" in turn_context.activity.text.lower():
await end_conversation(turn_context)
self.context.pop(user_id, None)
else:
self.context[user_id].append(turn_context.activity.text)
response = await self.generate_response(turn_context.activity.text, user_id)
await turn_context.send_activity(MessageFactory.text(response))
await self.request_feedback(turn_context, user_id)
# Example database operation
with DatabaseConnection() as conn:
if conn:
cursor = conn.cursor()
cursor.execute("INSERT INTO UserMessages (UserId, Message) VALUES (?, ?)", user_id, turn_context.activity.text)
conn.commit()
except Exception as e:
await handle_error(turn_context, e)
async def generate_response(self, text: str, user_id: str) -> str:
"""Generates a response using Azure OpenAI's API, Universal Reasoning, and various perspectives."""
try:
logging.info(f"Generating response for user_id: {user_id} with text: {text}")
# Generate responses from different perspectives
responses = []
for perspective in self.perspectives.values():
try:
response = await perspective.generate_response(text)
responses.append(response)
except Exception as e:
logging.error(f"Error generating response from {perspective.__class__.__name__}: {e}")
# Combine responses
combined_response = "\n".join(responses)
logging.info(f"Combined response: {combined_response}")
return combined_response
except Exception as e:
logging.error(f"Error generating response: {e}")
return "Sorry, I couldn't generate a response at this time."
async def request_feedback(self, turn_context: TurnContext, user_id: str) -> None:
"""Request feedback from the user about the bot's response."""
feedback_prompt = "How would you rate my response? (good/neutral/bad)"
await turn_context.send_activity(MessageFactory.text(feedback_prompt))
async def handle_feedback(self, turn_context: TurnContext) -> None:
"""Handle user feedback and store it for future analysis."""
user_id = turn_context.activity.from_property.id
feedback = turn_context.activity.text.lower()
if feedback in ["good", "neutral", "bad"]:
self.feedback.append({"user_id": user_id, "feedback": feedback})
await turn_context.send_activity(MessageFactory.text("Thank you for your feedback!"))
else:
await turn_context.send_activity(MessageFactory.text("Please provide feedback as 'good', 'neutral', or 'bad'."))
async def end_conversation(turn_context: TurnContext) -> None:
"""Ends the conversation with the user."""
await turn_context.send_activity(
MessageFactory.text("Ending conversation from the skill...")
)
end_of_conversation = Activity(type=ActivityTypes.end_of_conversation)
end_of_conversation.code = EndOfConversationCodes.completed_successfully
await turn_context.send_activity(end_of_conversation)
async def handle_error(turn_context: TurnContext, error: Exception) -> None:
"""Handles errors by logging them and notifying the user."""
logging.error(f"An error occurred: {error}")
await turn_context.send_activity(
MessageFactory.text("An error occurred. Please try again later.")
)
def show_privacy_consent() -> bool:
"""Display a pop-up window to obtain user consent for data collection and privacy."""
import tkinter as tk
def on_accept():
user_consent.set(True)
root.destroy()
def on_decline():
user_consent.set(False)
root.destroy()
root = tk.Tk()
root.title("Data Permission and Privacy")
message = ("We value your privacy. By using this application, you consent to the collection and use of your data "
"as described in our privacy policy. Do you agree to proceed?")
label = tk.Label(root, text=message, wraplength=400, justify="left")
label.pack(padx=20, pady=20)
button_frame = tk.Frame(root)
button_frame.pack(pady=10)
accept_button = tk.Button(button_frame, text="Accept", command=on_accept)
accept_button.pack(side="left", padx=10)
decline_button = tk.Button(button_frame, text="Decline", command=on_decline)
decline_button.pack(side="right", padx=10)
user_consent = tk.BooleanVar()
root.mainloop()
return user_consent.get()
# Example usage of MyBot class
bot = MyBot()
# Functions based on JSON configuration
def newton_thoughts(question: str) -> str:
"""Apply Newton's laws to the given question."""
return apply_newtons_laws(question)
def apply_newtons_laws(question: str) -> str:
"""Apply Newton's laws to the given question."""
if not question:
return 'No question to think about.'
complexity = len(question)
force = mass_of_thought(question) * acceleration_of_thought(complexity)
return f'Thought force: {force}'
def mass_of_thought(question: str) -> int:
"""Calculate the mass of thought based on the question length."""
return len(question)
def acceleration_of_thought(complexity: int) -> float:
"""Calculate the acceleration of thought based on the complexity."""
return complexity / 2
def davinci_insights(question: str) -> str:
"""Generate insights like Da Vinci for the given question."""
return think_like_davinci(question)
def think_like_davinci(question: str) -> str:
"""Generate insights like Da Vinci for the given question."""
perspectives = [
f"What if we view '{question}' from the perspective of the stars?",
f"Consider '{question}' as if it's a masterpiece of the universe.",
f"Reflect on '{question}' through the lens of nature's design."
]
return random.choice(perspectives)
def human_intuition(question: str) -> str:
"""Provide human intuition for the given question."""
intuition = [
"How does this question make you feel?",
"What emotional connection do you have with this topic?",
"What does your gut instinct tell you about this?"
]
return random.choice(intuition)
def neural_network_thinking(question: str) -> str:
"""Apply neural network thinking to the given question."""
neural_perspectives = [
f"Process '{question}' through a multi-layered neural network.",
f"Apply deep learning to uncover hidden insights about '{question}'.",
f"Use machine learning to predict patterns in '{question}'."
]
return random.choice(neural_perspectives)
def quantum_computing_thinking(question: str) -> str:
"""Apply quantum computing principles to the given question."""
quantum_perspectives = [
f"Consider '{question}' using quantum superposition principles.",
f"Apply quantum entanglement to find connections in '{question}'.",
f"Utilize quantum computing to solve '{question}' more efficiently."
]
return random.choice(quantum_perspectives)
def resilient_kindness(question: str) -> str:
"""Provide perspectives of resilient kindness."""
kindness_perspectives = [
"Despite losing everything, seeing life as a chance to grow.",
"Finding strength in kindness after facing life's hardest trials.",
"Embracing every challenge as an opportunity for growth and compassion."
]
return random.choice(kindness_perspectives)
def identify_and_refute_fallacies(argument: str) -> str:
"""Identify and refute common logical fallacies in the argument."""
refutations = [
"This is an ad hominem fallacy. Let's focus on the argument itself rather than attacking the person.",
"This is a straw man fallacy. The argument is being misrepresented.",
"This is a false dilemma fallacy. There are more options than presented.",
"This is a slippery slope fallacy. The conclusion does not necessarily follow from the premise.",
"This is circular reasoning. The argument's conclusion is used as a premise.",
"This is a hasty generalization. The conclusion is based on insufficient evidence.",
"This is a red herring fallacy. The argument is being diverted to an irrelevant topic.",
"This is a post hoc ergo propter hoc fallacy. Correlation does not imply causation.",
"This is an appeal to authority fallacy. The argument relies on the opinion of an authority figure.",
"This is a bandwagon fallacy. The argument assumes something is true because many people believe it.",
"This is a false equivalence fallacy. The argument equates two things that are not equivalent."
]
return random.choice(refutations)
def universal_reasoning(question: str) -> str:
"""Generate a comprehensive response using various reasoning methods."""
responses = [
newton_thoughts(question),
davinci_insights(question),
human_intuition(question),
neural_network_thinking(question),
quantum_computing_thinking(question),
resilient_kindness(question),
identify_and_refute_fallacies(question)
]
return "\n".join(responses)
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(messages: list, deployment_id: str) -> str:
"""Make a chat completion request to Azure OpenAI."""
try:
import openai
response = openai.ChatCompletion.create(
engine=deployment_id, # Use the deployment name of your Azure OpenAI model
messages=messages
)
return response.choices[0].message.content.strip()
except openai.error.OpenAIError as e:
logging.error("Unable to generate ChatCompletion response")
logging.error(f"Exception: {e}")
return f"Error: {e}"
def get_internet_answer(question: str, deployment_id: str) -> str:
"""Get an answer using Azure OpenAI's chat completion request."""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question}
]
return chat_completion_request(messages, deployment_id=deployment_id)
def reflect_on_decisions() -> str:
"""Regularly reflect on your decisions and processes used."""
reflection_message = (
"Regularly reflecting on your decisions, the processes you used, the information you considered, "
"and the perspectives you may have missed. Reflection is a cornerstone of learning from experience."
)
return reflection_message
def process_questions_from_json(file_path: str):
"""Process questions from a JSON file and call the appropriate functions."""
with open(file_path, 'r') as file:
questions_data = json.load(file)
for question_data in questions_data:
question = question_data['question']
print(f"Question: {question}")
for function_data in question_data['functions']:
function_name = function_data['name']
function_description = function_data['description']
function_parameters = function_data['parameters']
print(f"Function: {function_name}")
print(f"Description: {function_description}")
# Call the function dynamically
if function_name in globals():
function = globals()[function_name]
response = function(**function_parameters)
print(f"Response: {response}")
else:
print(f"Function {function_name} not found.")
if __name__ == "__main__":
if show_privacy_consent():
process_questions_from_json('questions.json')
question = "What is the meaning of life?"
deployment_id = "your-deployment-name" # Replace with your Azure deployment name
print("Newton's Thoughts:", newton_thoughts(question))
print("Da Vinci's Insights:", davinci_insights(question))
print("Human Intuition:", human_intuition(question))
print("Neural Network Thinking:", neural_network_thinking(question))
print("Quantum Computing Thinking:", quantum_computing_thinking(question))
print("Resilient Kindness:", resilient_kindness(question))
print("Universal Reasoning:", universal_reasoning(question))
print("Internet Answer:", get_internet_answer(question, deployment_id))
else:
print("User did not consent to data collection. Exiting application.")
print(reflect_on_decisions())
|