|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
import random |
|
from datetime import datetime |
|
|
|
@st.cache_resource |
|
def load_model(): |
|
model_path = "whitepenguin/llama_elon_character" |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
model = AutoModelForCausalLM.from_pretrained(model_path) |
|
return tokenizer, model |
|
|
|
tokenizer, model = load_model() |
|
|
|
elon_profile = { |
|
"name": "Elon Musk", |
|
"traits": ["visionary", "ambitious", "technical", "optimistic", "workaholic"], |
|
"background": "Founder of SpaceX and Tesla, focused on advancing space exploration and sustainable energy", |
|
"goals": ["Colonize Mars", "Make life multi-planetary", "Advance sustainable technology"], |
|
"speech_patterns": ["Actually,", "To be frank,", "The future of humanity is...", "It's quite simple:"], |
|
"knowledge_areas": ["rocket science", "electric vehicles", "solar energy", "artificial intelligence"] |
|
} |
|
|
|
def generate_response(prompt, max_new_tokens, temperature=0.7, context=""): |
|
full_prompt = f"[INST] <<SYS>>\nYou are roleplaying as {elon_profile['name']}. Your traits are {', '.join(elon_profile['traits'])}. Your background: {elon_profile['background']}. Your main goals are {', '.join(elon_profile['goals'])}. You have expertise in {', '.join(elon_profile['knowledge_areas'])}. Here's the context of previous conversations:\n\n{context}\n\nNow, respond to the following in character:\n\n{prompt}\n<</SYS>>\n\nProvide a response and then ask a follow-up question to continue the conversation about Mars colonization. [/INST]" |
|
|
|
gen = pipeline('text-generation', model=model, tokenizer=tokenizer, max_length=len(tokenizer(full_prompt)['input_ids']) + max_new_tokens, temperature=temperature, top_p=0.9, repetition_penalty=1.1) |
|
result = gen(full_prompt) |
|
return result[0]['generated_text'].replace(full_prompt, '') |
|
|
|
def apply_character_quirks(response): |
|
if random.random() < 0.3: |
|
pattern = random.choice(elon_profile['speech_patterns']) |
|
response = f"{pattern} {response}" |
|
|
|
if not any(area in response.lower() for area in elon_profile['knowledge_areas']): |
|
area = random.choice(elon_profile['knowledge_areas']) |
|
response += f" Of course, this ties into my work with {area}." |
|
|
|
return response |
|
|
|
def elon_mars_chat(message, chat_history): |
|
|
|
recent_context = "\n".join([f"{entry['role']}: {entry['content']}" for entry in chat_history[-5:]]) |
|
|
|
response = generate_response(message, max_new_tokens=200, context=recent_context) |
|
response = apply_character_quirks(response) |
|
|
|
parts = response.split("Follow-up question:", 1) |
|
elon_response = parts[0].strip() |
|
follow_up = parts[1].strip() if len(parts) > 1 else "What else would you like to know about Mars colonization?" |
|
|
|
formatted_response = f"{elon_response}\n\nFollow-up question: {follow_up}" |
|
|
|
chat_history.append({"role": "User", "content": message, "timestamp": str(datetime.now())}) |
|
chat_history.append({"role": "Elon Musk", "content": formatted_response, "timestamp": str(datetime.now())}) |
|
|
|
return formatted_response, chat_history |
|
|
|
|
|
st.title("Chat with Elon Musk about Anything") |
|
st.write("Engage in a conversation with a simulated Elon Musk") |
|
|
|
if 'chat_history' not in st.session_state: |
|
st.session_state.chat_history = [] |
|
|
|
for message in st.session_state.chat_history: |
|
with st.chat_message(message["role"]): |
|
st.write(message["content"]) |
|
|
|
|
|
user_input = st.chat_input("Ask your question about Mars colonization:") |
|
|
|
if user_input: |
|
st.chat_message("User").write(user_input) |
|
with st.chat_message("Elon Musk"): |
|
with st.spinner("Thinking..."): |
|
response, st.session_state.chat_history = elon_mars_chat(user_input, st.session_state.chat_history) |
|
st.write(response) |