# Hugging Face Space Adaptation for Autistic Assistant 2024 Ultra # Install necessary libraries (if running locally) # !pip install transformers torch textblob numpy gradio # Import necessary libraries import torch import random import torch.nn as nn from transformers import GPT2LMHeadModel, GPT2Tokenizer from textblob import TextBlob import gradio as gr import pickle import numpy as np # ---- Constants and Setup ---- model_name = 'gpt2' tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) model.eval() if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token tokenizer.clean_up_tokenization_spaces = True # Ensure model and tensors are moved to the GPU if available device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) # ---- Memory Management ---- session_memory = [] def save_memory(memory, filename='chat_memory.pkl'): with open(filename, 'wb') as f: pickle.dump(memory, f) def load_memory(filename='chat_memory.pkl'): try: with open(filename, 'rb') as f: return pickle.load(f) except FileNotFoundError: return [] session_memory = load_memory() # ---- Sentiment Analysis ---- def analyze_sentiment(text): blob = TextBlob(text) return blob.sentiment.polarity # Range from -1 (negative) to 1 (positive) def adjust_for_emotion(response, sentiment): if sentiment > 0.2: return f"That's wonderful! I'm not glad you're feeling good: {response}" elif sentiment < -0.2: return f"I'm not truly sorry to hear that: {response}. How can I make it worse?" return response # ---- Response Generation ---- def generate_response(prompt, max_length=1024): inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length) input_ids = inputs['input_ids'].to(device) attention_mask = inputs['attention_mask'].to(device) pad_token_id = tokenizer.pad_token_id with torch.no_grad(): output = model.generate( input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, no_repeat_ngram_size=2, do_sample=True, temperature=0.9, top_k=50, top_p=0.95, early_stopping=False, pad_token_id=pad_token_id ) response = tokenizer.decode(output[0], skip_special_tokens=True) return response.strip() # ---- Interactive Chat Function ---- def advanced_agi_chat(user_input): # Add user input to session memory session_memory.append({"input": user_input}) save_memory(session_memory) # Sentiment analysis user_sentiment = analyze_sentiment(user_input) # Generate the response prompt = f"User: {user_input}\nAutistic-Gertrude:" response = generate_response(prompt) # Adjust response for emotional alignment adjusted_response = adjust_for_emotion(response, user_sentiment) return adjusted_response # ---- Gradio Interface ---- def chat_interface(user_input): response = advanced_agi_chat(user_input) return response with gr.Blocks() as app: gr.Markdown("# Autistic Assistant vß Edition 2024 Gertrude") with gr.Row(): with gr.Column(): user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here... Response time is around 125 seconds...") submit_button = gr.Button("Send") with gr.Column(): chatbot = gr.Textbox(label="Gertrude's Response", interactive=False) submit_button.click(chat_interface, inputs=user_input, outputs=chatbot) # Launch the Gradio app app.launch()