Spaces:
Sleeping
Sleeping
haepa_mac
commited on
Commit
Β·
564c3d1
1
Parent(s):
058f20f
π§ Add ConversationMemory System with JSON Export/Import - λν κΈ°λ‘ JSON μ μ₯/λ‘λ, ν€μλ μΆμΆ, κ°μ λΆμ, μ¬μ©μ νλ‘ν νμ΅ μμ€ν μΆκ°
Browse files- app.py +214 -54
- modules/persona_generator.py +463 -163
- requirements.txt +9 -10
app.py
CHANGED
@@ -16,6 +16,10 @@ import PIL.ImageDraw
|
|
16 |
import random
|
17 |
import copy
|
18 |
from modules.persona_generator import PersonaGenerator, PersonalityProfile, HumorMatrix
|
|
|
|
|
|
|
|
|
19 |
|
20 |
# AVIF μ§μμ μν νλ¬κ·ΈμΈ νμ±ν
|
21 |
try:
|
@@ -622,7 +626,7 @@ def export_persona_to_json(persona):
|
|
622 |
|
623 |
# JSON νμΌ μμ±
|
624 |
persona_name = persona_copy.get("κΈ°λ³Έμ 보", {}).get("μ΄λ¦", "persona")
|
625 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
626 |
filename = f"{persona_name}_{timestamp}.json"
|
627 |
|
628 |
# μμ νμΌ μμ±
|
@@ -650,70 +654,45 @@ def export_persona_to_json(persona):
|
|
650 |
# return None, "μ΄ κΈ°λ₯μ λ μ΄μ μ¬μ©νμ§ μμ΅λλ€. JSON μ
λ‘λλ₯Ό μ¬μ©νμΈμ.", {}, {}, None, [], [], [], ""
|
651 |
|
652 |
def chat_with_loaded_persona(persona, user_message, chat_history=None, api_provider="gemini", api_key=None):
|
653 |
-
"""
|
654 |
|
655 |
-
if
|
656 |
-
|
657 |
-
|
658 |
-
if not user_message.strip():
|
659 |
-
return chat_history or [], ""
|
660 |
|
661 |
try:
|
662 |
-
#
|
663 |
-
|
664 |
-
|
|
|
665 |
else:
|
666 |
-
#
|
667 |
-
|
668 |
-
if persona_generator is None:
|
669 |
-
persona_generator = PersonaGenerator()
|
670 |
-
generator = persona_generator
|
671 |
|
672 |
-
# λν
|
673 |
conversation_history = []
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
# μ΄μ λ²μ μ tuple νν μ²λ¦¬
|
681 |
-
conversation_history.append({"role": "user", "content": message[0]})
|
682 |
-
conversation_history.append({"role": "assistant", "content": message[1]})
|
683 |
-
|
684 |
-
# νλ₯΄μλμ λν (μ€μ λ API μ¬μ©)
|
685 |
-
response = generator.chat_with_persona(persona, user_message, conversation_history)
|
686 |
-
|
687 |
-
# μλ‘μ΄ λνλ₯Ό messages ννλ‘ μΆκ°
|
688 |
-
if chat_history is None:
|
689 |
-
chat_history = []
|
690 |
-
|
691 |
-
# Gradio 5.31.0 messages νμ: κ° λ©μμ§λ λ³λλ‘ μΆκ°
|
692 |
-
new_history = chat_history.copy()
|
693 |
-
new_history.append({"role": "user", "content": user_message})
|
694 |
-
new_history.append({"role": "assistant", "content": response})
|
695 |
-
|
696 |
-
return new_history, ""
|
697 |
|
698 |
-
|
699 |
-
|
700 |
-
|
701 |
|
702 |
-
#
|
703 |
-
|
704 |
-
error_response = "π
API ν€κ° μ€μ λμ§ μμμ΄μ! μλ¨μ 'π§ API μ€μ 'μμ Gemini λλ OpenAI API ν€λ₯Ό μ
λ ₯ν΄μ£ΌμΈμ."
|
705 |
-
else:
|
706 |
-
error_response = f"π API μ°κ²°μ λ¬Έμ κ° μμ΄μ: {str(e)}"
|
707 |
|
708 |
-
|
709 |
-
|
710 |
|
711 |
-
|
712 |
-
new_history = chat_history.copy()
|
713 |
-
new_history.append({"role": "user", "content": user_message})
|
714 |
-
new_history.append({"role": "assistant", "content": error_response})
|
715 |
|
716 |
-
|
|
|
|
|
|
|
717 |
|
718 |
def import_persona_from_json(json_file):
|
719 |
"""JSON νμΌμμ νλ₯΄μλ κ°μ Έμ€κΈ°"""
|
@@ -890,6 +869,134 @@ def test_api_connection(api_provider, api_key):
|
|
890 |
except Exception as e:
|
891 |
return f"β API ν
μ€νΈ μ€ μ€λ₯: {str(e)}"
|
892 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
893 |
# λ©μΈ μΈν°νμ΄μ€ μμ±
|
894 |
def create_main_interface():
|
895 |
# νκΈ ν°νΈ μ€μ
|
@@ -1093,6 +1200,32 @@ def create_main_interface():
|
|
1093 |
example_btn1 = gr.Button("\"μλ
!\"", variant="outline", size="sm")
|
1094 |
example_btn2 = gr.Button("\"λλ λꡬμΌ?\"", variant="outline", size="sm")
|
1095 |
example_btn3 = gr.Button("\"λ μ’μν΄?\"", variant="outline", size="sm")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1096 |
|
1097 |
# μ΄λ²€νΈ νΈλ€λ¬
|
1098 |
create_btn.click(
|
@@ -1226,6 +1359,33 @@ def create_main_interface():
|
|
1226 |
fn=lambda: [],
|
1227 |
outputs=[personas_list]
|
1228 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1229 |
|
1230 |
return app
|
1231 |
|
|
|
16 |
import random
|
17 |
import copy
|
18 |
from modules.persona_generator import PersonaGenerator, PersonalityProfile, HumorMatrix
|
19 |
+
import pandas as pd
|
20 |
+
import plotly.graph_objects as go
|
21 |
+
import plotly.express as px
|
22 |
+
from plotly.subplots import make_subplots
|
23 |
|
24 |
# AVIF μ§μμ μν νλ¬κ·ΈμΈ νμ±ν
|
25 |
try:
|
|
|
626 |
|
627 |
# JSON νμΌ μμ±
|
628 |
persona_name = persona_copy.get("κΈ°λ³Έμ 보", {}).get("μ΄λ¦", "persona")
|
629 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
630 |
filename = f"{persona_name}_{timestamp}.json"
|
631 |
|
632 |
# μμ νμΌ μμ±
|
|
|
654 |
# return None, "μ΄ κΈ°λ₯μ λ μ΄μ μ¬μ©νμ§ μμ΅λλ€. JSON μ
λ‘λλ₯Ό μ¬μ©νμΈμ.", {}, {}, None, [], [], [], ""
|
655 |
|
656 |
def chat_with_loaded_persona(persona, user_message, chat_history=None, api_provider="gemini", api_key=None):
|
657 |
+
"""νλ₯΄μλμ μ±ν
(3λ¨κ³ κΈ°μ΅ μμ€ν
νμ©)"""
|
658 |
|
659 |
+
if chat_history is None:
|
660 |
+
chat_history = []
|
|
|
|
|
|
|
661 |
|
662 |
try:
|
663 |
+
# κΈλ‘λ² persona_generator μ¬μ© (API μ€μ μ΄ μ μ©λ μν)
|
664 |
+
generator = persona_generator
|
665 |
+
if generator is None:
|
666 |
+
generator = PersonaGenerator(api_provider=api_provider, api_key=api_key)
|
667 |
else:
|
668 |
+
# API μ€μ μ
λ°μ΄νΈ
|
669 |
+
generator.set_api_config(api_provider, api_key)
|
|
|
|
|
|
|
670 |
|
671 |
+
# λν κΈ°λ‘ λ³ν
|
672 |
conversation_history = []
|
673 |
+
for message in chat_history:
|
674 |
+
if isinstance(message, tuple):
|
675 |
+
conversation_history.append(message)
|
676 |
+
else:
|
677 |
+
conversation_history.append({"role": "user", "content": message[0]})
|
678 |
+
conversation_history.append({"role": "assistant", "content": message[1]})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
679 |
|
680 |
+
# π§ μΈμ
ID μμ± (νλ₯΄μλ μ΄λ¦ κΈ°λ°)
|
681 |
+
persona_name = persona.get("κΈ°λ³Έμ 보", {}).get("μ΄λ¦", "μ μ μλ νλ₯΄μλ")
|
682 |
+
session_id = f"{persona_name}_{hash(str(persona)) % 10000}" # κ°λ¨ν μΈμ
ID
|
683 |
|
684 |
+
# νλ₯΄μλμ μ±ν
(3λ¨κ³ κΈ°μ΅ μμ€ν
νμ©)
|
685 |
+
response = generator.chat_with_persona(persona, user_message, conversation_history, session_id)
|
|
|
|
|
|
|
686 |
|
687 |
+
# μ±ν
κΈ°λ‘ μ
λ°μ΄νΈ
|
688 |
+
chat_history.append((user_message, response))
|
689 |
|
690 |
+
return chat_history, ""
|
|
|
|
|
|
|
691 |
|
692 |
+
except Exception as e:
|
693 |
+
error_message = f"μ±ν
μ€ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
|
694 |
+
chat_history.append((user_message, "μ, λ―Έμν΄... λκ° λ¬Έμ κ° μκΈ΄ κ² κ°μ... π
"))
|
695 |
+
return chat_history, ""
|
696 |
|
697 |
def import_persona_from_json(json_file):
|
698 |
"""JSON νμΌμμ νλ₯΄μλ κ°μ Έμ€κΈ°"""
|
|
|
869 |
except Exception as e:
|
870 |
return f"β API ν
μ€νΈ μ€ μ€λ₯: {str(e)}"
|
871 |
|
872 |
+
def export_conversation_history():
|
873 |
+
"""λν κΈ°λ‘μ JSONμΌλ‘ λ΄λ³΄λ΄κΈ°"""
|
874 |
+
global persona_generator
|
875 |
+
if persona_generator and hasattr(persona_generator, 'conversation_memory'):
|
876 |
+
json_data = persona_generator.conversation_memory.export_to_json()
|
877 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
878 |
+
filename = f"conversation_history_{timestamp}.json"
|
879 |
+
return json_data, filename
|
880 |
+
else:
|
881 |
+
return None, "conversation_empty.json"
|
882 |
+
|
883 |
+
def import_conversation_history(json_file):
|
884 |
+
"""JSONμμ λν κΈ°λ‘ κ°μ Έμ€κΈ°"""
|
885 |
+
global persona_generator
|
886 |
+
try:
|
887 |
+
if json_file is None:
|
888 |
+
return "νμΌμ μ νν΄μ£ΌμΈμ."
|
889 |
+
|
890 |
+
# νμΌ λ΄μ© μ½κΈ°
|
891 |
+
content = json_file.read().decode('utf-8')
|
892 |
+
|
893 |
+
# persona_generator μ΄κΈ°ν νμΈ
|
894 |
+
if persona_generator is None:
|
895 |
+
persona_generator = PersonaGenerator()
|
896 |
+
|
897 |
+
# λν κΈ°λ‘ κ°μ Έμ€κΈ°
|
898 |
+
success = persona_generator.conversation_memory.import_from_json(content)
|
899 |
+
|
900 |
+
if success:
|
901 |
+
summary = persona_generator.conversation_memory.get_conversation_summary()
|
902 |
+
return f"β
λν κΈ°λ‘μ μ±κ³΅μ μΌλ‘ κ°μ Έμμ΅λλ€!\n\n{summary}"
|
903 |
+
else:
|
904 |
+
return "β νμΌ νμμ΄ μ¬λ°λ₯΄μ§ μμ΅λλ€."
|
905 |
+
|
906 |
+
except Exception as e:
|
907 |
+
return f"β κ°μ Έμ€κΈ° μ€ν¨: {str(e)}"
|
908 |
+
|
909 |
+
def show_conversation_analytics():
|
910 |
+
"""λν λΆμ κ²°κ³Ό νμ"""
|
911 |
+
global persona_generator
|
912 |
+
if not persona_generator or not hasattr(persona_generator, 'conversation_memory'):
|
913 |
+
return "λΆμν λνκ° μμ΅λλ€."
|
914 |
+
|
915 |
+
memory = persona_generator.conversation_memory
|
916 |
+
|
917 |
+
# κΈ°λ³Έ ν΅κ³
|
918 |
+
analytics = f"## π λν λΆμ 리ν¬νΈ\n\n"
|
919 |
+
analytics += f"### π’ κΈ°λ³Έ ν΅κ³\n"
|
920 |
+
analytics += f"β’ μ΄ λν μ: {len(memory.conversations)}ν\n"
|
921 |
+
analytics += f"β’ ν€μλ μ: {len(memory.keywords)}κ°\n"
|
922 |
+
analytics += f"β’ νμ± μΈμ
: {len(memory.user_profile)}κ°\n\n"
|
923 |
+
|
924 |
+
# μμ ν€μλ
|
925 |
+
top_keywords = memory.get_top_keywords(limit=10)
|
926 |
+
if top_keywords:
|
927 |
+
analytics += f"### π μμ ν€μλ TOP 10\n"
|
928 |
+
for i, (word, data) in enumerate(top_keywords, 1):
|
929 |
+
analytics += f"{i}. **{word}** ({data['category']}) - {data['total_frequency']}ν\n"
|
930 |
+
analytics += "\n"
|
931 |
+
|
932 |
+
# μΉ΄ν
κ³ λ¦¬λ³ ν€μλ
|
933 |
+
categories = {}
|
934 |
+
for word, data in memory.keywords.items():
|
935 |
+
category = data['category']
|
936 |
+
if category not in categories:
|
937 |
+
categories[category] = []
|
938 |
+
categories[category].append((word, data['total_frequency']))
|
939 |
+
|
940 |
+
analytics += f"### π μΉ΄ν
κ³ λ¦¬λ³ κ΄μ¬μ¬\n"
|
941 |
+
for category, words in categories.items():
|
942 |
+
top_words = sorted(words, key=lambda x: x[1], reverse=True)[:3]
|
943 |
+
word_list = ", ".join([f"{word}({freq})" for word, freq in top_words])
|
944 |
+
analytics += f"**{category}**: {word_list}\n"
|
945 |
+
|
946 |
+
analytics += "\n"
|
947 |
+
|
948 |
+
# μ΅κ·Ό κ°μ κ²½ν₯
|
949 |
+
if memory.conversations:
|
950 |
+
recent_sentiments = [conv['sentiment'] for conv in memory.conversations[-10:]]
|
951 |
+
sentiment_counts = {"κΈμ μ ": 0, "λΆμ μ ": 0, "μ€λ¦½μ ": 0}
|
952 |
+
for sentiment in recent_sentiments:
|
953 |
+
sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1
|
954 |
+
|
955 |
+
analytics += f"### π μ΅κ·Ό κ°μ κ²½ν₯ (μ΅κ·Ό 10ν)\n"
|
956 |
+
for sentiment, count in sentiment_counts.items():
|
957 |
+
percentage = (count / len(recent_sentiments)) * 100
|
958 |
+
analytics += f"β’ {sentiment}: {count}ν ({percentage:.1f}%)\n"
|
959 |
+
|
960 |
+
return analytics
|
961 |
+
|
962 |
+
def get_keyword_suggestions(current_message=""):
|
963 |
+
"""νμ¬ λ©μμ§ κΈ°λ° ν€μλ μ μ"""
|
964 |
+
global persona_generator
|
965 |
+
if not persona_generator or not hasattr(persona_generator, 'conversation_memory'):
|
966 |
+
return "ν€μλ λΆμμ μν λν κΈ°λ‘μ΄ μμ΅λλ€."
|
967 |
+
|
968 |
+
memory = persona_generator.conversation_memory
|
969 |
+
|
970 |
+
if current_message:
|
971 |
+
# νμ¬ λ©μμ§μμ ν€μλ μΆμΆ
|
972 |
+
extracted = memory._extract_keywords(current_message)
|
973 |
+
suggestions = f"## π― '{current_message}'μμ μΆμΆλ ν€μλ\n\n"
|
974 |
+
|
975 |
+
if extracted:
|
976 |
+
for kw in extracted:
|
977 |
+
suggestions += f"β’ **{kw['word']}** ({kw['category']}) - {kw['frequency']}ν\n"
|
978 |
+
else:
|
979 |
+
suggestions += "μΆμΆλ ν€μλκ° μμ΅λλ€.\n"
|
980 |
+
|
981 |
+
# κ΄λ ¨ κ³Όκ±° λν μ°ΎκΈ°
|
982 |
+
context = memory.get_relevant_context(current_message)
|
983 |
+
if context["relevant_conversations"]:
|
984 |
+
suggestions += f"\n### π κ΄λ ¨λ κ³Όκ±° λν\n"
|
985 |
+
for conv in context["relevant_conversations"][:3]:
|
986 |
+
suggestions += f"β’ {conv['user_message'][:30]}... (κ°μ : {conv['sentiment']})\n"
|
987 |
+
|
988 |
+
return suggestions
|
989 |
+
else:
|
990 |
+
# μ 체 ν€μλ μμ½
|
991 |
+
top_keywords = memory.get_top_keywords(limit=15)
|
992 |
+
if top_keywords:
|
993 |
+
suggestions = "## π μ 체 ν€μλ μμ½\n\n"
|
994 |
+
for word, data in top_keywords:
|
995 |
+
suggestions += f"β’ **{word}** ({data['category']}) - {data['total_frequency']}ν, μ΅κ·Ό: {data['last_mentioned'][:10]}\n"
|
996 |
+
return suggestions
|
997 |
+
else:
|
998 |
+
return "μμ§ μμ§λ ν€μλκ° μμ΅λλ€."
|
999 |
+
|
1000 |
# λ©μΈ μΈν°νμ΄μ€ μμ±
|
1001 |
def create_main_interface():
|
1002 |
# νκΈ ν°νΈ μ€μ
|
|
|
1200 |
example_btn1 = gr.Button("\"μλ
!\"", variant="outline", size="sm")
|
1201 |
example_btn2 = gr.Button("\"λλ λꡬμΌ?\"", variant="outline", size="sm")
|
1202 |
example_btn3 = gr.Button("\"λ μ’μν΄?\"", variant="outline", size="sm")
|
1203 |
+
|
1204 |
+
# π§ λν λΆμ ν μΆκ°
|
1205 |
+
with gr.Tab("π§ λν λΆμ"):
|
1206 |
+
gr.Markdown("### π λν κΈ°λ‘ κ΄λ¦¬ λ° λΆμ")
|
1207 |
+
|
1208 |
+
with gr.Row():
|
1209 |
+
with gr.Column():
|
1210 |
+
gr.Markdown("#### πΎ λν κΈ°λ‘ μ μ₯/λΆλ¬μ€κΈ°")
|
1211 |
+
export_btn = gr.Button("π₯ λν κΈ°λ‘ JSON λ€μ΄λ‘λ", variant="secondary")
|
1212 |
+
download_file = gr.File(label="λ€μ΄λ‘λ", visible=False)
|
1213 |
+
|
1214 |
+
import_file = gr.File(label="π€ λν κΈ°λ‘ JSON μ
λ‘λ", file_types=[".json"])
|
1215 |
+
import_result = gr.Textbox(label="κ°μ Έμ€κΈ° κ²°κ³Ό", lines=3, interactive=False)
|
1216 |
+
|
1217 |
+
with gr.Column():
|
1218 |
+
gr.Markdown("#### π μ€μκ° ν€μλ λΆμ")
|
1219 |
+
keyword_input = gr.Textbox(label="λΆμν λ©μμ§ (μ νμ¬ν)", placeholder="λ©μμ§λ₯Ό μ
λ ₯νλ©΄ ν€μλλ₯Ό λΆμν©λλ€")
|
1220 |
+
keyword_btn = gr.Button("π― ν€μλ λΆμ", variant="primary")
|
1221 |
+
keyword_result = gr.Textbox(label="ν€μλ λΆμ κ²°κ³Ό", lines=10, interactive=False)
|
1222 |
+
|
1223 |
+
gr.Markdown("---")
|
1224 |
+
|
1225 |
+
with gr.Row():
|
1226 |
+
analytics_btn = gr.Button("π μ 체 λν λΆμ 리ν¬νΈ", variant="primary", size="lg")
|
1227 |
+
|
1228 |
+
analytics_result = gr.Markdown("### λΆμ κ²°κ³Όκ° μ¬κΈ°μ νμλ©λλ€")
|
1229 |
|
1230 |
# μ΄λ²€νΈ νΈλ€λ¬
|
1231 |
create_btn.click(
|
|
|
1359 |
fn=lambda: [],
|
1360 |
outputs=[personas_list]
|
1361 |
)
|
1362 |
+
|
1363 |
+
# μ΄λ²€νΈ μ°κ²°
|
1364 |
+
export_btn.click(
|
1365 |
+
export_conversation_history,
|
1366 |
+
outputs=[download_file, download_file]
|
1367 |
+
).then(
|
1368 |
+
lambda x: gr.update(visible=True) if x[0] else gr.update(visible=False),
|
1369 |
+
inputs=[download_file],
|
1370 |
+
outputs=[download_file]
|
1371 |
+
)
|
1372 |
+
|
1373 |
+
import_file.upload(
|
1374 |
+
import_conversation_history,
|
1375 |
+
inputs=[import_file],
|
1376 |
+
outputs=[import_result]
|
1377 |
+
)
|
1378 |
+
|
1379 |
+
keyword_btn.click(
|
1380 |
+
get_keyword_suggestions,
|
1381 |
+
inputs=[keyword_input],
|
1382 |
+
outputs=[keyword_result]
|
1383 |
+
)
|
1384 |
+
|
1385 |
+
analytics_btn.click(
|
1386 |
+
show_conversation_analytics,
|
1387 |
+
outputs=[analytics_result]
|
1388 |
+
)
|
1389 |
|
1390 |
return app
|
1391 |
|
modules/persona_generator.py
CHANGED
@@ -5,6 +5,9 @@ import datetime
|
|
5 |
import google.generativeai as genai
|
6 |
from dotenv import load_dotenv
|
7 |
from PIL import Image
|
|
|
|
|
|
|
8 |
|
9 |
# OpenAI API μ§μ μΆκ°
|
10 |
try:
|
@@ -27,6 +30,254 @@ if gemini_api_key:
|
|
27 |
if openai_api_key and OPENAI_AVAILABLE:
|
28 |
openai.api_key = openai_api_key
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
# --- PersonalityProfile & HumorMatrix ν΄λμ€ (127κ° λ³μ/μ λ¨Έ λ§€νΈλ¦μ€/곡μ ν¬ν¨) ---
|
31 |
class PersonalityProfile:
|
32 |
# 127κ° μ±κ²© λ³μ μ²΄κ³ (011_metrics_personality.md, 012_research_personality.md κΈ°λ°)
|
@@ -717,43 +968,27 @@ class HumorMatrix:
|
|
717 |
return "\n".join(prompt_parts)
|
718 |
|
719 |
class PersonaGenerator:
|
720 |
-
"""
|
721 |
|
722 |
def __init__(self, api_provider="gemini", api_key=None):
|
723 |
-
|
724 |
-
self.api_provider = api_provider.lower()
|
725 |
self.api_key = api_key
|
|
|
726 |
|
727 |
# API μ€μ
|
728 |
-
|
729 |
-
|
730 |
-
|
731 |
-
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
-
|
736 |
-
|
737 |
-
|
738 |
-
|
739 |
-
|
740 |
-
|
741 |
-
elif self.api_provider == "openai":
|
742 |
-
self.api_key = os.getenv("OPENAI_API_KEY")
|
743 |
-
if self.api_key and OPENAI_AVAILABLE:
|
744 |
-
openai.api_key = self.api_key
|
745 |
-
|
746 |
-
# μ±κ²© νΉμ± κΈ°λ³Έκ°
|
747 |
-
self.default_traits = {
|
748 |
-
"μ¨κΈ°": 50,
|
749 |
-
"λ₯λ ₯": 50,
|
750 |
-
"μ°½μμ±": 50,
|
751 |
-
"μΈν₯μ±": 50,
|
752 |
-
"μ λ¨Έκ°κ°": 50,
|
753 |
-
"μ λ’°μ±": 50,
|
754 |
-
"곡κ°λ₯λ ₯": 50,
|
755 |
-
}
|
756 |
-
|
757 |
def set_api_config(self, api_provider, api_key):
|
758 |
"""API μ€μ λ³κ²½"""
|
759 |
self.api_provider = api_provider.lower()
|
@@ -1804,58 +2039,57 @@ class PersonaGenerator:
|
|
1804 |
"""κΈ°μ‘΄ ν¨μ μ΄λ¦ μ μ§νλ©΄μ μλ‘μ΄ κ΅¬μ‘°νλ ν둬ννΈ μ¬μ©"""
|
1805 |
return self.generate_persona_prompt(persona)
|
1806 |
|
1807 |
-
def chat_with_persona(self, persona, user_message, conversation_history=[]):
|
1808 |
-
"""
|
1809 |
-
|
1810 |
-
|
1811 |
-
|
1812 |
try:
|
1813 |
-
#
|
|
|
|
|
|
|
1814 |
if "μ±κ²©νλ‘ν" in persona:
|
1815 |
personality_profile = PersonalityProfile.from_dict(persona["μ±κ²©νλ‘ν"])
|
1816 |
else:
|
1817 |
-
#
|
1818 |
-
|
1819 |
-
|
1820 |
-
|
1821 |
-
|
1822 |
-
|
1823 |
-
|
1824 |
-
|
1825 |
-
|
1826 |
-
|
1827 |
-
|
1828 |
-
|
1829 |
-
|
1830 |
-
|
1831 |
-
|
1832 |
-
humor = personality_data.get('μ λ¨Έκ°κ°', personality_profile.get_category_summary("H"))
|
1833 |
-
competence = personality_data.get('λ₯λ ₯', personality_profile.get_category_summary("C"))
|
1834 |
-
extraversion = personality_data.get('μΈν₯μ±', personality_profile.get_category_summary("E"))
|
1835 |
-
creativity = personality_data.get('μ°½μμ±', personality_profile.variables.get("C04_μ°½μμ±", 50))
|
1836 |
-
empathy = personality_data.get('곡κ°λ₯λ ₯', personality_profile.variables.get("W06_곡κ°λ₯λ ₯", 50))
|
1837 |
|
1838 |
# μ±κ²© μ ν κ²°μ
|
1839 |
-
personality_type = self.
|
|
|
|
|
|
|
|
|
1840 |
|
1841 |
-
#
|
1842 |
-
|
1843 |
-
base_prompt = persona["ꡬ쑰νν둬ννΈ"]
|
1844 |
-
else:
|
1845 |
-
base_prompt = self.generate_persona_prompt(persona)
|
1846 |
|
1847 |
-
#
|
1848 |
detailed_personality_prompt = self._generate_detailed_personality_instructions(personality_profile)
|
1849 |
|
1850 |
-
#
|
1851 |
-
|
|
|
1852 |
|
1853 |
-
# μ±κ²©λ³
|
1854 |
-
personality_specific_prompt = self.
|
1855 |
-
personality_type, user_message, conversation_history
|
1856 |
)
|
1857 |
|
1858 |
-
# λν κΈ°λ‘ κ΅¬μ±
|
1859 |
history_text = ""
|
1860 |
if conversation_history:
|
1861 |
history_text = "\n\n## π λν κΈ°λ‘:\n"
|
@@ -1876,7 +2110,7 @@ class PersonaGenerator:
|
|
1876 |
# π 127κ° λ³μ κΈ°λ° μν©λ³ λ°μ κ°μ΄λ
|
1877 |
situational_guide = self._generate_situational_response_guide(personality_profile, user_message)
|
1878 |
|
1879 |
-
# μ΅μ’
ν둬ννΈ μ‘°ν©
|
1880 |
full_prompt = f"""{base_prompt}
|
1881 |
|
1882 |
{detailed_personality_prompt}
|
@@ -1885,6 +2119,12 @@ class PersonaGenerator:
|
|
1885 |
|
1886 |
{personality_specific_prompt}
|
1887 |
|
|
|
|
|
|
|
|
|
|
|
|
|
1888 |
{history_text}
|
1889 |
|
1890 |
## π― νμ¬ μν© λΆμ:
|
@@ -1897,14 +2137,18 @@ class PersonaGenerator:
|
|
1897 |
"{user_message}"
|
1898 |
|
1899 |
## π λΉμ μ λ°μ:
|
1900 |
-
μμ λͺ¨λ μ±κ²© μ§μΉ¨(127κ° λ³μ, μ λ¨Έ λ§€νΈλ¦μ€, λ§€λ ₯μ κ²°ν¨, λͺ¨μμ νΉμ±)
|
1901 |
-
|
1902 |
-
|
1903 |
|
1904 |
λ΅λ³:"""
|
1905 |
|
1906 |
# API νΈμΆ (λ©ν° API μ§μ)
|
1907 |
response_text = self._generate_text_with_api(full_prompt)
|
|
|
|
|
|
|
|
|
1908 |
return response_text
|
1909 |
|
1910 |
except Exception as e:
|
@@ -2038,84 +2282,47 @@ class PersonaGenerator:
|
|
2038 |
|
2039 |
return guide
|
2040 |
|
2041 |
-
def
|
2042 |
-
"""μ±κ²©λ³
|
2043 |
|
2044 |
instructions = f"\n## π― μ±κ²©λ³ νΉλ³ μ§μΉ¨ ({personality_type['name']}):\n"
|
2045 |
|
2046 |
-
#
|
2047 |
-
|
2048 |
-
|
2049 |
-
|
|
|
2050 |
|
2051 |
-
#
|
2052 |
-
|
2053 |
-
if is_greeting:
|
2054 |
-
instructions += "β’ κ³Όλν μ λλ‘ νμνλ©° μλμ§ λμΉκ² λ°μ\n"
|
2055 |
-
instructions += "β’ μ¦μ μ¬λ―Έμλ νλμ΄λ κ²μ μ μ\n"
|
2056 |
-
elif is_question:
|
2057 |
-
instructions += "β’ λ΅λ³λ³΄λ€ λ λ§μ μ§λ¬ΈμΌλ‘ νΈκΈ°μ¬ νλ° νν\n"
|
2058 |
-
instructions += "β’ ν₯λ―Έμ§μ§ν κ΄λ ¨ κ²½νλ΄ κ³΅μ \n"
|
2059 |
-
elif is_emotional:
|
2060 |
-
instructions += "β’ κ°μ μ 10λ°°λ‘ μ¦ννμ¬ κ³΅κ°\n"
|
2061 |
-
instructions += "β’ κΈ°λΆ μ νν μ¬λ―Έμλ μμ΄λμ΄ μ μ\n"
|
2062 |
|
2063 |
-
|
2064 |
-
|
2065 |
-
|
2066 |
-
|
2067 |
-
elif is_question:
|
2068 |
-
instructions += "β’ λ
Όλ¦¬μ μ΄κ³ 체κ³μ μΈ λΆμ μ 곡\n"
|
2069 |
-
instructions += "β’ μ§λ¬Έμ μ νμ±κ³Ό κ΅¬μ²΄μ± μꡬ\n"
|
2070 |
-
elif is_emotional:
|
2071 |
-
instructions += "β’ κ°μ λ³΄λ€ ν΄κ²°λ°©μμ μ§μ€\n"
|
2072 |
-
instructions += "β’ λ
Όλ¦¬μ κ΄μ μμ μν© μ¬μ μ\n"
|
2073 |
|
2074 |
-
|
2075 |
-
|
2076 |
-
|
2077 |
-
|
2078 |
-
elif is_question:
|
2079 |
-
instructions += "β’ μ§λ¬Έ λ€μ κ°μ κ³Ό μꡬ νμ\n"
|
2080 |
-
instructions += "β’ μΆ©λΆν μκ°μ λκ³ κΉμ΄ μκ² λ΅λ³\n"
|
2081 |
-
elif is_emotional:
|
2082 |
-
instructions += "β’ κ°μ μ μμ ν μμ©νκ³ κ³΅κ°\n"
|
2083 |
-
instructions += "β’ μΉμ μ μ΄κ³ μλ‘κ° λλ λ°μ\n"
|
2084 |
-
|
2085 |
-
elif personality_type['name'] == 'μνΈ λμΉλ μ§μμΈ':
|
2086 |
-
if is_greeting:
|
2087 |
-
instructions += "β’ μΈλ ¨λ λ§μ₯λμ΄λ μ² νμ μΈμ¬\n"
|
2088 |
-
instructions += "β’ λ§λ¨μ μλ―Έμ λν ν₯λ―Έλ‘μ΄ κ΄μ μ μ\n"
|
2089 |
-
elif is_question:
|
2090 |
-
instructions += "β’ μμμΉ λͺ»ν κ°λμμ λΆμ\n"
|
2091 |
-
instructions += "β’ μ§μ νΈκΈ°μ¬μ μκ·Ήνλ μμ§λ¬Έ\n"
|
2092 |
-
elif is_emotional:
|
2093 |
-
instructions += "β’ κ°μ μ μ§μ μΌλ‘ λΆμνμ¬ μλ‘μ΄ ν΅μ°° μ 곡\n"
|
2094 |
-
instructions += "β’ μ λ¨Έλ‘ ν¬μ₯λ κΉμ΄ μλ μλ‘\n"
|
2095 |
-
|
2096 |
-
elif personality_type['name'] == 'μμ€μ λͺ½μκ°':
|
2097 |
-
if is_greeting:
|
2098 |
-
instructions += "β’ μ‘°μ¬μ€λ½κ³ λͺ½νμ μΈ μ²«μΈμ¬\n"
|
2099 |
-
instructions += "β’ νΉλ³ν λ§λ¨μ λν κ°μ±μ νν\n"
|
2100 |
-
elif is_question:
|
2101 |
-
instructions += "β’ μμλ ₯ λμΉλ κ΄μ μμ λ΅λ³\n"
|
2102 |
-
instructions += "β’ μμ μ΄κ³ μμ μ μΈ νν μ¬μ©\n"
|
2103 |
-
elif is_emotional:
|
2104 |
-
instructions += "β’ μ¬μΈνκ³ κΉμ΄ μλ κ°μ 곡μ \n"
|
2105 |
-
instructions += "β’ κΏμ΄λ μμμ ν΅ν μλ‘\n"
|
2106 |
-
|
2107 |
-
elif personality_type['name'] == '카리μ€λ§ν± 리λ':
|
2108 |
-
if is_greeting:
|
2109 |
-
instructions += "β’ νμ μ μ°¨κ³ λ¦¬λμ μλ μΈμ¬\n"
|
2110 |
-
instructions += "β’ μμΌλ‘μ κ°λ₯μ±κ³Ό μ μ¬λ ₯μ λν μΈκΈ\n"
|
2111 |
-
elif is_question:
|
2112 |
-
instructions += "β’ λμ μ μ΄κ³ μ±μ₯ μ§ν₯μ κ΄μ μ μ\n"
|
2113 |
-
instructions += "β’ νλκ³Ό μ€νμ μ λνλ λ΅λ³\n"
|
2114 |
-
elif is_emotional:
|
2115 |
-
instructions += "β’ κ°μ μ μ±μ₯μ κΈ°νλ‘ μ¬νλ μ΄λ°\n"
|
2116 |
-
instructions += "β’ μ©κΈ°μ ν¬λ§μ λΆμ΄λ£λ λ©μμ§\n"
|
2117 |
|
2118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2119 |
if is_greeting:
|
2120 |
instructions += "β’ ν‘ν‘ νκ³ μλμ§ λμΉλ μΈμ¬\n"
|
2121 |
instructions += "β’ μ¦μ λμ΄λ μ¬λ―Έμλ νλ μ μ\n"
|
@@ -2125,26 +2332,23 @@ class PersonaGenerator:
|
|
2125 |
elif is_emotional:
|
2126 |
instructions += "β’ μμνκ³ μ§μ€ν 곡κ°\n"
|
2127 |
instructions += "β’ μμκ³Ό λμ΄λ₯Ό ν΅ν κΈ°λΆ μ ν\n"
|
2128 |
-
|
2129 |
-
|
2130 |
-
|
2131 |
-
instructions += "β’
|
2132 |
-
|
2133 |
-
|
2134 |
-
|
2135 |
-
|
2136 |
-
|
2137 |
-
|
2138 |
-
|
2139 |
-
|
2140 |
-
# λν κΈ°λ‘ κΈ°λ° μΆκ° μ§μΉ¨
|
2141 |
-
if len(conversation_history) == 0:
|
2142 |
-
instructions += "⒠첫 λνμ΄λ―λ‘ λΉμ μ λ
νΉν λ§€λ ₯μ κ°νκ² μ΄ν\n"
|
2143 |
-
elif len(conversation_history) >= 3:
|
2144 |
-
instructions += "β’ κ΄κ³κ° κΉμ΄μ§κ³ μμΌλ―λ‘ λ κ°μΈμ μ΄κ³ μΉλ°ν μν΅\n"
|
2145 |
|
2146 |
instructions += f"β’ λ°λμ '{personality_type['name']}' μ€νμΌμ μΌκ΄λκ² μ μ§\n"
|
2147 |
instructions += "β’ λ§€λ ₯μ κ²°ν¨κ³Ό λͺ¨μμ νΉμ±μ μμ°μ€λ½κ² λλ¬λ΄κΈ°\n"
|
|
|
|
|
2148 |
|
2149 |
return instructions
|
2150 |
|
@@ -2297,6 +2501,102 @@ class PersonaGenerator:
|
|
2297 |
|
2298 |
return descriptions
|
2299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2300 |
def generate_personality_preview(persona_name, personality_traits):
|
2301 |
"""μ±κ²© νΉμ±μ κΈ°λ°μΌλ‘ ν λ¬Έμ₯ 미리보기 μμ± - κ·Ήλͺ
ν μ°¨λ³ν"""
|
2302 |
if not personality_traits:
|
|
|
5 |
import google.generativeai as genai
|
6 |
from dotenv import load_dotenv
|
7 |
from PIL import Image
|
8 |
+
import io
|
9 |
+
from typing import Dict, List, Any, Optional
|
10 |
+
import re
|
11 |
|
12 |
# OpenAI API μ§μ μΆκ°
|
13 |
try:
|
|
|
30 |
if openai_api_key and OPENAI_AVAILABLE:
|
31 |
openai.api_key = openai_api_key
|
32 |
|
33 |
+
class ConversationMemory:
|
34 |
+
"""
|
35 |
+
νκΉ
νμ΄μ€ νκ²½μ© λν κΈ°μ΅ μμ€ν
|
36 |
+
- JSON μ μ₯/λ‘λ μ§μ
|
37 |
+
- ν€μλ μΆμΆ λ° λΆμ
|
38 |
+
- λΈλΌμ°μ κΈ°λ° μ μ₯μ νμ©
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self):
|
42 |
+
self.conversations = [] # μ 체 λν κΈ°λ‘
|
43 |
+
self.keywords = {} # μΆμΆλ ν€μλλ€
|
44 |
+
self.user_profile = {} # μ¬μ©μ νλ‘ν
|
45 |
+
self.relationship_data = {} # κ΄κ³ λ°μ λ°μ΄ν°
|
46 |
+
|
47 |
+
def add_conversation(self, user_message, ai_response, session_id="default"):
|
48 |
+
"""μλ‘μ΄ λν μΆκ°"""
|
49 |
+
conversation_entry = {
|
50 |
+
"timestamp": datetime.datetime.now().isoformat(),
|
51 |
+
"session_id": session_id,
|
52 |
+
"user_message": user_message,
|
53 |
+
"ai_response": ai_response,
|
54 |
+
"keywords": self._extract_keywords(user_message),
|
55 |
+
"sentiment": self._analyze_sentiment(user_message),
|
56 |
+
"conversation_id": len(self.conversations)
|
57 |
+
}
|
58 |
+
|
59 |
+
self.conversations.append(conversation_entry)
|
60 |
+
self._update_keywords(conversation_entry["keywords"])
|
61 |
+
self._update_user_profile(user_message, session_id)
|
62 |
+
|
63 |
+
return conversation_entry
|
64 |
+
|
65 |
+
def _extract_keywords(self, text):
|
66 |
+
"""ν
μ€νΈμμ ν€μλ μΆμΆ"""
|
67 |
+
# νκ΅μ΄ ν€μλ μΆμΆ ν¨ν΄
|
68 |
+
keyword_patterns = {
|
69 |
+
"κ°μ ": ["κΈ°μ", "μ¬ν", "νλ", "μμ", "ν볡", "μ°μΈ", "μ¦κ²", "μ§μ¦", "μ λ", "κ±±μ "],
|
70 |
+
"νλ": ["곡λΆ", "μΌ", "κ²μ", "μ΄λ", "μ¬ν", "μ리", "λ
μ", "μν", "μμ
", "μΌν"],
|
71 |
+
"κ΄κ³": ["μΉκ΅¬", "κ°μ‘±", "μ°μΈ", "λλ£", "μ μλ", "λΆλͺ¨", "νμ ", "μΈλ", "λλ", "λμ"],
|
72 |
+
"μκ°": ["μ€λ", "μ΄μ ", "λ΄μΌ", "μμΉ¨", "μ μ¬", "μ λ
", "μ£Όλ§", "νμΌ", "λ°©ν", "ν΄κ°"],
|
73 |
+
"μ₯μ": ["μ§", "νκ΅", "νμ¬", "μΉ΄ν", "μλΉ", "곡μ", "λμκ΄", "μνκ΄", "μΌνλͺ°"],
|
74 |
+
"μ·¨λ―Έ": ["λλΌλ§", "μ λ", "μΉν°", "μ νλΈ", "μΈμ€ν", "ν±ν‘", "λ·νλ¦μ€", "κ²μ"],
|
75 |
+
"μμ": ["λ°₯", "λ©΄", "μΉν¨", "νΌμ", "컀νΌ", "μ°¨", "κ³Όμ", "μμ΄μ€ν¬λ¦Ό", "λ‘λ³Άμ΄"],
|
76 |
+
"λ μ¨": ["λ₯", "μΆ₯", "λΉ", "λ", "λ§", "νλ¦Ό", "λ°λ", "μ΅", "건쑰"]
|
77 |
+
}
|
78 |
+
|
79 |
+
found_keywords = []
|
80 |
+
text_lower = text.lower()
|
81 |
+
|
82 |
+
for category, words in keyword_patterns.items():
|
83 |
+
for word in words:
|
84 |
+
if word in text_lower:
|
85 |
+
found_keywords.append({
|
86 |
+
"word": word,
|
87 |
+
"category": category,
|
88 |
+
"frequency": text_lower.count(word)
|
89 |
+
})
|
90 |
+
|
91 |
+
# μΆκ°λ‘ λͺ
μ¬ μΆμΆ (κ°λ¨ν ν¨ν΄)
|
92 |
+
nouns = re.findall(r'[κ°-ν£]{2,}', text)
|
93 |
+
for noun in nouns:
|
94 |
+
if len(noun) >= 2 and noun not in [kw["word"] for kw in found_keywords]:
|
95 |
+
found_keywords.append({
|
96 |
+
"word": noun,
|
97 |
+
"category": "κΈ°ν",
|
98 |
+
"frequency": 1
|
99 |
+
})
|
100 |
+
|
101 |
+
return found_keywords
|
102 |
+
|
103 |
+
def _analyze_sentiment(self, text):
|
104 |
+
"""κ°μ λΆμ"""
|
105 |
+
positive_words = ["μ’μ", "κΈ°μ", "ν볡", "μ¦κ²", "μ¬λ°", "μ λ", "μλ²½", "μ΅κ³ ", "μ¬λ", "κ³ λ§μ"]
|
106 |
+
negative_words = ["μ«μ΄", "μ¬ν", "νλ", "μμ", "μ°μΈ", "μ§μ¦", "νλ€", "νΌκ³€", "μ€νΈλ μ€"]
|
107 |
+
|
108 |
+
positive_count = sum(1 for word in positive_words if word in text)
|
109 |
+
negative_count = sum(1 for word in negative_words if word in text)
|
110 |
+
|
111 |
+
if positive_count > negative_count:
|
112 |
+
return "κΈμ μ "
|
113 |
+
elif negative_count > positive_count:
|
114 |
+
return "λΆμ μ "
|
115 |
+
else:
|
116 |
+
return "μ€λ¦½μ "
|
117 |
+
|
118 |
+
def _update_keywords(self, new_keywords):
|
119 |
+
"""ν€μλ λ°μ΄ν°λ² μ΄μ€ μ
λ°μ΄νΈ"""
|
120 |
+
for keyword_data in new_keywords:
|
121 |
+
word = keyword_data["word"]
|
122 |
+
category = keyword_data["category"]
|
123 |
+
|
124 |
+
if word not in self.keywords:
|
125 |
+
self.keywords[word] = {
|
126 |
+
"category": category,
|
127 |
+
"total_frequency": 0,
|
128 |
+
"last_mentioned": datetime.datetime.now().isoformat(),
|
129 |
+
"contexts": []
|
130 |
+
}
|
131 |
+
|
132 |
+
self.keywords[word]["total_frequency"] += keyword_data["frequency"]
|
133 |
+
self.keywords[word]["last_mentioned"] = datetime.datetime.now().isoformat()
|
134 |
+
|
135 |
+
def _update_user_profile(self, user_message, session_id):
|
136 |
+
"""μ¬μ©μ νλ‘ν μ
λ°μ΄νΈ"""
|
137 |
+
if session_id not in self.user_profile:
|
138 |
+
self.user_profile[session_id] = {
|
139 |
+
"message_count": 0,
|
140 |
+
"avg_message_length": 0,
|
141 |
+
"preferred_topics": {},
|
142 |
+
"emotional_tendency": "μ€λ¦½μ ",
|
143 |
+
"communication_style": "νλ²ν¨",
|
144 |
+
"relationship_level": "μλ‘μ΄_λ§λ¨"
|
145 |
+
}
|
146 |
+
|
147 |
+
profile = self.user_profile[session_id]
|
148 |
+
profile["message_count"] += 1
|
149 |
+
|
150 |
+
# νκ· λ©μμ§ κΈΈμ΄ μ
λ°μ΄νΈ
|
151 |
+
current_avg = profile["avg_message_length"]
|
152 |
+
new_length = len(user_message)
|
153 |
+
profile["avg_message_length"] = (current_avg * (profile["message_count"] - 1) + new_length) / profile["message_count"]
|
154 |
+
|
155 |
+
# μν΅ μ€νμΌ λΆμ
|
156 |
+
if new_length > 50:
|
157 |
+
profile["communication_style"] = "μμΈν¨"
|
158 |
+
elif new_length < 10:
|
159 |
+
profile["communication_style"] = "κ°κ²°ν¨"
|
160 |
+
|
161 |
+
# κ΄κ³ λ 벨 μ
λ°μ΄νΈ
|
162 |
+
if profile["message_count"] <= 3:
|
163 |
+
profile["relationship_level"] = "첫_λ§λ¨"
|
164 |
+
elif profile["message_count"] <= 10:
|
165 |
+
profile["relationship_level"] = "μμκ°λ_μ€"
|
166 |
+
elif profile["message_count"] <= 20:
|
167 |
+
profile["relationship_level"] = "μΉμν΄μ§"
|
168 |
+
else:
|
169 |
+
profile["relationship_level"] = "μΉλ°ν_κ΄κ³"
|
170 |
+
|
171 |
+
def get_relevant_context(self, current_message, session_id="default", max_history=5):
|
172 |
+
"""νμ¬ λ©μμ§μ κ΄λ ¨λ 컨ν
μ€νΈ λ°ν"""
|
173 |
+
# νμ¬ λ©μμ§μ ν€μλ μΆμΆ
|
174 |
+
current_keywords = self._extract_keywords(current_message)
|
175 |
+
current_words = [kw["word"] for kw in current_keywords]
|
176 |
+
|
177 |
+
# κ΄λ ¨ κ³Όκ±° λν μ°ΎκΈ°
|
178 |
+
relevant_conversations = []
|
179 |
+
for conv in self.conversations[-20:]: # μ΅κ·Ό 20κ° μ€μμ
|
180 |
+
if conv["session_id"] == session_id:
|
181 |
+
conv_words = [kw["word"] for kw in conv["keywords"]]
|
182 |
+
# κ³΅ν΅ ν€μλκ° μμΌλ©΄ κ΄λ ¨ λνλ‘ νλ¨
|
183 |
+
if any(word in conv_words for word in current_words):
|
184 |
+
relevant_conversations.append(conv)
|
185 |
+
|
186 |
+
# μ΅μ μμΌλ‘ μ λ ¬νκ³ μ΅λ κ°μλ§νΌ λ°ν
|
187 |
+
relevant_conversations.sort(key=lambda x: x["timestamp"], reverse=True)
|
188 |
+
|
189 |
+
return {
|
190 |
+
"recent_conversations": self.conversations[-max_history:] if self.conversations else [],
|
191 |
+
"relevant_conversations": relevant_conversations[:3],
|
192 |
+
"user_profile": self.user_profile.get(session_id, {}),
|
193 |
+
"common_keywords": current_words,
|
194 |
+
"conversation_sentiment": self._analyze_sentiment(current_message)
|
195 |
+
}
|
196 |
+
|
197 |
+
def get_top_keywords(self, limit=10, category=None):
|
198 |
+
"""μμ ν€μλ λ°ν"""
|
199 |
+
filtered_keywords = self.keywords
|
200 |
+
if category:
|
201 |
+
filtered_keywords = {k: v for k, v in self.keywords.items() if v["category"] == category}
|
202 |
+
|
203 |
+
sorted_keywords = sorted(
|
204 |
+
filtered_keywords.items(),
|
205 |
+
key=lambda x: x[1]["total_frequency"],
|
206 |
+
reverse=True
|
207 |
+
)
|
208 |
+
|
209 |
+
return sorted_keywords[:limit]
|
210 |
+
|
211 |
+
def export_to_json(self):
|
212 |
+
"""JSON ννλ‘ λ΄λ³΄λ΄κΈ°"""
|
213 |
+
export_data = {
|
214 |
+
"conversations": self.conversations,
|
215 |
+
"keywords": self.keywords,
|
216 |
+
"user_profile": self.user_profile,
|
217 |
+
"relationship_data": self.relationship_data,
|
218 |
+
"export_timestamp": datetime.datetime.now().isoformat(),
|
219 |
+
"total_conversations": len(self.conversations),
|
220 |
+
"total_keywords": len(self.keywords)
|
221 |
+
}
|
222 |
+
return json.dumps(export_data, ensure_ascii=False, indent=2)
|
223 |
+
|
224 |
+
def import_from_json(self, json_data):
|
225 |
+
"""JSONμμ κ°μ Έμ€κΈ°"""
|
226 |
+
try:
|
227 |
+
if isinstance(json_data, str):
|
228 |
+
data = json.loads(json_data)
|
229 |
+
else:
|
230 |
+
data = json_data
|
231 |
+
|
232 |
+
self.conversations = data.get("conversations", [])
|
233 |
+
self.keywords = data.get("keywords", {})
|
234 |
+
self.user_profile = data.get("user_profile", {})
|
235 |
+
self.relationship_data = data.get("relationship_data", {})
|
236 |
+
|
237 |
+
return True
|
238 |
+
except Exception as e:
|
239 |
+
print(f"JSON κ°μ Έμ€κΈ° μ€ν¨: {e}")
|
240 |
+
return False
|
241 |
+
|
242 |
+
def get_conversation_summary(self, session_id="default"):
|
243 |
+
"""λν μμ½ μ 보"""
|
244 |
+
session_conversations = [c for c in self.conversations if c["session_id"] == session_id]
|
245 |
+
|
246 |
+
if not session_conversations:
|
247 |
+
return "μμ§ λνκ° μμ΅λλ€."
|
248 |
+
|
249 |
+
total_count = len(session_conversations)
|
250 |
+
recent_topics = []
|
251 |
+
sentiments = []
|
252 |
+
|
253 |
+
for conv in session_conversations[-5:]:
|
254 |
+
recent_topics.extend([kw["word"] for kw in conv["keywords"]])
|
255 |
+
sentiments.append(conv["sentiment"])
|
256 |
+
|
257 |
+
# μ΅λΉ μ£Όμ
|
258 |
+
topic_counts = {}
|
259 |
+
for topic in recent_topics:
|
260 |
+
topic_counts[topic] = topic_counts.get(topic, 0) + 1
|
261 |
+
|
262 |
+
top_topics = sorted(topic_counts.items(), key=lambda x: x[1], reverse=True)[:3]
|
263 |
+
|
264 |
+
# κ°μ κ²½ν₯
|
265 |
+
sentiment_counts = {"κΈμ μ ": 0, "λΆμ μ ": 0, "μ€λ¦½μ ": 0}
|
266 |
+
for sentiment in sentiments:
|
267 |
+
sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1
|
268 |
+
|
269 |
+
dominant_sentiment = max(sentiment_counts, key=sentiment_counts.get)
|
270 |
+
|
271 |
+
summary = f"""
|
272 |
+
π λν μμ½ ({session_id})
|
273 |
+
β’ μ΄ λν μ: {total_count}ν
|
274 |
+
β’ μ£Όμ κ΄μ¬μ¬: {', '.join([t[0] for t in top_topics[:3]])}
|
275 |
+
β’ κ°μ κ²½ν₯: {dominant_sentiment}
|
276 |
+
β’ κ΄κ³ λ¨κ³: {self.user_profile.get(session_id, {}).get('relationship_level', 'μ μ μμ')}
|
277 |
+
"""
|
278 |
+
|
279 |
+
return summary.strip()
|
280 |
+
|
281 |
# --- PersonalityProfile & HumorMatrix ν΄λμ€ (127κ° λ³μ/μ λ¨Έ λ§€νΈλ¦μ€/곡μ ν¬ν¨) ---
|
282 |
class PersonalityProfile:
|
283 |
# 127κ° μ±κ²© λ³μ μ²΄κ³ (011_metrics_personality.md, 012_research_personality.md κΈ°λ°)
|
|
|
968 |
return "\n".join(prompt_parts)
|
969 |
|
970 |
class PersonaGenerator:
|
971 |
+
"""μ΄λ―Έμ§μμ νλ₯΄μλλ₯Ό μμ±νκ³ λνλ₯Ό μ²λ¦¬νλ ν΄λμ€"""
|
972 |
|
973 |
def __init__(self, api_provider="gemini", api_key=None):
|
974 |
+
self.api_provider = api_provider
|
|
|
975 |
self.api_key = api_key
|
976 |
+
self.conversation_memory = ConversationMemory() # μλ‘μ΄ λν κΈ°μ΅ μμ€ν
|
977 |
|
978 |
# API μ€μ
|
979 |
+
load_dotenv()
|
980 |
+
if api_provider == "gemini":
|
981 |
+
gemini_key = api_key or os.getenv('GEMINI_API_KEY')
|
982 |
+
if gemini_key:
|
983 |
+
genai.configure(api_key=gemini_key)
|
984 |
+
self.api_key = gemini_key
|
985 |
+
elif api_provider == "openai":
|
986 |
+
openai_key = api_key or os.getenv('OPENAI_API_KEY')
|
987 |
+
if openai_key:
|
988 |
+
import openai
|
989 |
+
openai.api_key = openai_key
|
990 |
+
self.api_key = openai_key
|
991 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
992 |
def set_api_config(self, api_provider, api_key):
|
993 |
"""API μ€μ λ³κ²½"""
|
994 |
self.api_provider = api_provider.lower()
|
|
|
2039 |
"""κΈ°μ‘΄ ν¨μ μ΄λ¦ μ μ§νλ©΄μ μλ‘μ΄ κ΅¬μ‘°νλ ν둬ννΈ μ¬μ©"""
|
2040 |
return self.generate_persona_prompt(persona)
|
2041 |
|
2042 |
+
def chat_with_persona(self, persona, user_message, conversation_history=[], session_id="default"):
|
2043 |
+
"""
|
2044 |
+
νλ₯΄μλμ λν - 127κ° λ³μ + 3λ¨κ³ κΈ°μ΅ μμ€ν
κΈ°λ°
|
2045 |
+
"""
|
|
|
2046 |
try:
|
2047 |
+
# κΈ°λ³Έ ν둬ννΈ μμ±
|
2048 |
+
base_prompt = self.generate_persona_prompt(persona)
|
2049 |
+
|
2050 |
+
# μ±κ²© νλ‘ν μΆμΆ
|
2051 |
if "μ±κ²©νλ‘ν" in persona:
|
2052 |
personality_profile = PersonalityProfile.from_dict(persona["μ±κ²©νλ‘ν"])
|
2053 |
else:
|
2054 |
+
# λ κ±°μ λ°μ΄ν° μ²λ¦¬
|
2055 |
+
personality_data = persona.get("μ±κ²©νΉμ±", {})
|
2056 |
+
warmth = personality_data.get('μ¨κΈ°', 50)
|
2057 |
+
competence = personality_data.get('λ₯λ ₯', 50)
|
2058 |
+
extraversion = personality_data.get('μΈν₯μ±', 50)
|
2059 |
+
creativity = personality_data.get('μ°½μμ±', 50)
|
2060 |
+
empathy = personality_data.get('곡κ°λ₯λ ₯', 50)
|
2061 |
+
humor = 75 # κΈ°λ³Έκ°μ 75λ‘ κ³ μ
|
2062 |
+
|
2063 |
+
personality_type = self._determine_personality_type(
|
2064 |
+
warmth, humor, competence, extraversion, creativity, empathy
|
2065 |
+
)
|
2066 |
+
personality_profile = self._create_comprehensive_personality_profile(
|
2067 |
+
{"object_type": "unknown"}, "unknown"
|
2068 |
+
)
|
|
|
|
|
|
|
|
|
|
|
2069 |
|
2070 |
# μ±κ²© μ ν κ²°μ
|
2071 |
+
personality_type = self._determine_base_personality_type(
|
2072 |
+
personality_profile.get_category_summary("W"),
|
2073 |
+
personality_profile.get_category_summary("C"),
|
2074 |
+
personality_profile.get_category_summary("H")
|
2075 |
+
)
|
2076 |
|
2077 |
+
# π§ 3λ¨κ³ κΈ°μ΅ μμ€ν
μμ 컨ν
μ€νΈ κ°μ Έμ€κΈ°
|
2078 |
+
memory_context = self.conversation_memory.get_context_for_response(personality_type, session_id)
|
|
|
|
|
|
|
2079 |
|
2080 |
+
# 127κ° λ³μ κΈ°λ° μΈλΆ μ±κ²© νΉμ±
|
2081 |
detailed_personality_prompt = self._generate_detailed_personality_instructions(personality_profile)
|
2082 |
|
2083 |
+
# μ λ¨Έ λ§€νΈλ¦μ€ κΈ°λ° μ λ¨Έ μ€νμΌ
|
2084 |
+
humor_matrix = persona.get("μ λ¨Έλ§€νΈλ¦μ€", {})
|
2085 |
+
humor_instructions = f"\n## π μ λ¨Έ μ€νμΌ:\n{humor_matrix.get('description', 'μ¬μΉμκ³ λ°λ»ν μ λ¨Έ')}\n"
|
2086 |
|
2087 |
+
# μ±κ²©λ³ νΉλ³ μ§μΉ¨ (κΈ°μ΅ μμ€ν
μ 보 ν¬ν¨)
|
2088 |
+
personality_specific_prompt = self._generate_personality_specific_instructions_with_memory(
|
2089 |
+
personality_type, user_message, conversation_history, memory_context
|
2090 |
)
|
2091 |
|
2092 |
+
# λν κΈ°λ‘ κ΅¬μ± (λ¨κΈ° κΈ°μ΅ νμ©)
|
2093 |
history_text = ""
|
2094 |
if conversation_history:
|
2095 |
history_text = "\n\n## π λν κΈ°λ‘:\n"
|
|
|
2110 |
# π 127κ° λ³μ κΈ°λ° μν©λ³ λ°μ κ°μ΄λ
|
2111 |
situational_guide = self._generate_situational_response_guide(personality_profile, user_message)
|
2112 |
|
2113 |
+
# μ΅μ’
ν둬ννΈ μ‘°ν© (κΈ°μ΅ μμ€ν
컨ν
μ€νΈ ν¬ν¨)
|
2114 |
full_prompt = f"""{base_prompt}
|
2115 |
|
2116 |
{detailed_personality_prompt}
|
|
|
2119 |
|
2120 |
{personality_specific_prompt}
|
2121 |
|
2122 |
+
{memory_context['short_term_context']}
|
2123 |
+
|
2124 |
+
{memory_context['medium_term_insights']}
|
2125 |
+
|
2126 |
+
{memory_context['long_term_adaptations']}
|
2127 |
+
|
2128 |
{history_text}
|
2129 |
|
2130 |
## π― νμ¬ μν© λΆμ:
|
|
|
2137 |
"{user_message}"
|
2138 |
|
2139 |
## π λΉμ μ λ°μ:
|
2140 |
+
μμ λͺ¨λ μ±κ²© μ§μΉ¨(127κ° λ³μ, μ λ¨Έ λ§€νΈλ¦μ€, λ§€λ ₯μ κ²°ν¨, λͺ¨μμ νΉμ±)κ³Ό
|
2141 |
+
3λ¨κ³ κΈ°μ΅ μμ€ν
μ 보λ₯Ό μ’
ν©νμ¬, κ°μΈνλκ³ κΉμ΄ μλ λνλ₯Ό μ΄μ΄κ°μΈμ.
|
2142 |
+
κ³Όκ±° λνλ₯Ό κΈ°μ΅νκ³ , μ¬μ©μμ νΉμ±μ λ§μΆ° μ μ λ λμ λ°μμ μ 곡νμΈμ.
|
2143 |
|
2144 |
λ΅λ³:"""
|
2145 |
|
2146 |
# API νΈμΆ (λ©ν° API μ§μ)
|
2147 |
response_text = self._generate_text_with_api(full_prompt)
|
2148 |
+
|
2149 |
+
# π§ κΈ°μ΅ μμ€ν
μ μλ‘μ΄ μνΈμμ© μΆκ°
|
2150 |
+
self.conversation_memory.add_interaction(user_message, response_text, session_id)
|
2151 |
+
|
2152 |
return response_text
|
2153 |
|
2154 |
except Exception as e:
|
|
|
2282 |
|
2283 |
return guide
|
2284 |
|
2285 |
+
def _generate_personality_specific_instructions_with_memory(self, personality_type, user_message, conversation_history, memory_context):
|
2286 |
+
"""κΈ°μ΅ μμ€ν
μ νμ©ν μ±κ²©λ³ νΉλ³ μ§μΉ¨ μμ±"""
|
2287 |
|
2288 |
instructions = f"\n## π― μ±κ²©λ³ νΉλ³ μ§μΉ¨ ({personality_type['name']}):\n"
|
2289 |
|
2290 |
+
# λ©μμ§ κΈΈμ΄ μ‘°μ μ§μΉ¨ μΆκ°
|
2291 |
+
instructions += "### π λ©μμ§ κΈΈμ΄ κ°μ΄λλΌμΈ:\n"
|
2292 |
+
instructions += "β’ ν λ²μ 3-4κ° λ¬Έμ₯ μ΄λ΄λ‘ μ ν\n"
|
2293 |
+
instructions += "β’ λ무 λ§μ μ£Όμ λ₯Ό ν λ²μ λ€λ£¨μ§ λ§ κ²\n"
|
2294 |
+
instructions += "β’ μ¬μ©μκ° λΆλ΄μ€λ¬μνλ©΄ μ¦μ κ°κ²°νκ² μ‘°μ \n\n"
|
2295 |
|
2296 |
+
# π§ κΈ°μ΅ κΈ°λ° λ§μΆ€ μ§μΉ¨
|
2297 |
+
instructions += "### π§ κΈ°μ΅ κΈ°λ° κ°μΈν μ§μΉ¨:\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2298 |
|
2299 |
+
# μ€κΈ° κΈ°μ΅ νμ©
|
2300 |
+
if "μ΄ μΈμ
μμ νμ
ν μ¬μ©μ νΉμ±" in memory_context['medium_term_insights']:
|
2301 |
+
instructions += "β’ μ΄λ―Έ νμ
λ μ¬μ©μ νΉμ±μ λ°νμΌλ‘ λμ± λ§μΆ€νλ λ°μ\n"
|
2302 |
+
instructions += "β’ κ΄κ³ λ°μ λ¨κ³μ λ§λ μΉλ°λ μ‘°μ \n"
|
|
|
|
|
|
|
|
|
|
|
|
|
2303 |
|
2304 |
+
# μ₯κΈ° κΈ°μ΅ νμ©
|
2305 |
+
if "νμ΅λ μ¬μ©μ μ νΈλ" in memory_context['long_term_adaptations']:
|
2306 |
+
instructions += "β’ κ³Όκ±° νμ΅λ μ νΈλμ λ§μΆ° μν΅ μ€νμΌ μ‘°μ \n"
|
2307 |
+
instructions += "β’ μ±κ³΅μ μ΄μλ λν ν¨ν΄ μ°Έκ³ νμ¬ λ°μ\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2308 |
|
2309 |
+
# κΈ°μ‘΄ μ±κ²©λ³ μ§μΉ¨λ€...
|
2310 |
+
# λν μν© λΆμ
|
2311 |
+
is_greeting = any(word in user_message.lower() for word in ['μλ
', 'μ²μ', 'λ§λ', 'λ°κ°'])
|
2312 |
+
is_question = '?' in user_message or any(word in user_message for word in ['λ', 'μ΄λ€', 'μ΄λ»κ²', 'μ', 'μΈμ '])
|
2313 |
+
is_emotional = any(word in user_message for word in ['μ¬ν', 'κΈ°μ', 'νλ', 'μμ', 'ν볡', 'κ±±μ '])
|
2314 |
+
is_complaint = any(word in user_message for word in ['λ§μ΄ λ§', 'κΈΈμ΄', 'μ§§κ²', 'κ°λ¨ν', 'μ‘°μ©'])
|
2315 |
+
|
2316 |
+
# λΆλ§ ννμ λν λμ μ§μΉ¨ μΆκ°
|
2317 |
+
if is_complaint:
|
2318 |
+
instructions += "### β οΈ μ¬μ©μ λΆλ§ λμ:\n"
|
2319 |
+
instructions += "β’ μ¦μ μΈμ νκ³ μ¬κ³Ό\n"
|
2320 |
+
instructions += "β’ λ€μ λ©μμ§λΆν° νμ€ν μ§§κ² μ‘°μ \n"
|
2321 |
+
instructions += "β’ κ°μ μ€μ λ°λ³΅νμ§ μκΈ°\n"
|
2322 |
+
instructions += "β’ μ±κ²©μ μ μ§νλ νν λ°©μλ§ μ‘°μ \n\n"
|
2323 |
+
|
2324 |
+
# μ±κ²© μ νλ³ μΈλΆ μ§μΉ¨ (κΈ°μ‘΄ μ½λμ λμΌνμ§λ§ κΈ°μ΅ μ 보 νμ©)
|
2325 |
+
if personality_type['name'] == 'μ₯λκΎΈλ¬κΈ°_μΉκ΅¬':
|
2326 |
if is_greeting:
|
2327 |
instructions += "β’ ν‘ν‘ νκ³ μλμ§ λμΉλ μΈμ¬\n"
|
2328 |
instructions += "β’ μ¦μ λμ΄λ μ¬λ―Έμλ νλ μ μ\n"
|
|
|
2332 |
elif is_emotional:
|
2333 |
instructions += "β’ μμνκ³ μ§μ€ν 곡κ°\n"
|
2334 |
instructions += "β’ μμκ³Ό λμ΄λ₯Ό ν΅ν κΈ°λΆ μ ν\n"
|
2335 |
+
elif is_complaint:
|
2336 |
+
instructions += "β’ κ·μ½κ² μ¬κ³Όνκ³ λ°λ‘ μμ νκΈ°\n"
|
2337 |
+
instructions += "β’ μ°λ§ν μ±κ²©μ μΈμ νλ λ
Έλ ₯νοΏ½οΏ½οΏ½λ€κ³ μ½μ\n"
|
2338 |
+
instructions += "β’ λ€μ λ©μμ§λ λ°λμ 2-3λ¬Έμ₯μΌλ‘ μ ν\n"
|
2339 |
+
|
2340 |
+
# λ°λ³΅ λ°©μ§ μ§μΉ¨ μΆκ° (κΈ°μ΅ μμ€ν
κ°ν)
|
2341 |
+
if len(conversation_history) > 0:
|
2342 |
+
instructions += "### π λ°λ³΅ λ°©μ§ (κΈ°μ΅ μμ€ν
νμ©):\n"
|
2343 |
+
instructions += "β’ λ¨κΈ°/μ€κΈ°/μ₯κΈ° κΈ°μ΅μ λͺ¨λ νμ©νμ¬ λ°λ³΅ μ§λ¬Έ λ°©μ§\n"
|
2344 |
+
instructions += "β’ μλ‘μ΄ μ£Όμ λ κ΄μ μΌλ‘ λν λ°μ μν€κΈ°\n"
|
2345 |
+
instructions += "β’ μ΄μ λν λ§₯λ½μ μμ°μ€λ½κ² μ°κ²°\n"
|
2346 |
+
instructions += "β’ μ¬μ©μμμ κ΄κ³ λ°μ κ³Όμ μ λ°μν λν\n\n"
|
|
|
|
|
|
|
|
|
|
|
2347 |
|
2348 |
instructions += f"β’ λ°λμ '{personality_type['name']}' μ€νμΌμ μΌκ΄λκ² μ μ§\n"
|
2349 |
instructions += "β’ λ§€λ ₯μ κ²°ν¨κ³Ό λͺ¨μμ νΉμ±μ μμ°μ€λ½κ² λλ¬λ΄κΈ°\n"
|
2350 |
+
instructions += "β’ **λ©μμ§λ 3-4λ¬Έμ₯ μ΄λ΄λ‘ μ ν** (νΉν μ¬μ©μκ° λΆλ§ ννν κ²½μ°)\n"
|
2351 |
+
instructions += "β’ **3λ¨κ³ κΈ°μ΅ μμ€ν
μ νμ©νμ¬ μ μ λ κ°μΈνλ λ°μ μ 곡**\n"
|
2352 |
|
2353 |
return instructions
|
2354 |
|
|
|
2501 |
|
2502 |
return descriptions
|
2503 |
|
2504 |
+
def save_memory_to_file(self, filepath):
|
2505 |
+
"""κΈ°μ΅ λ°μ΄ν°λ₯Ό νμΌλ‘ μ μ₯"""
|
2506 |
+
try:
|
2507 |
+
memory_data = self.export_memory()
|
2508 |
+
with open(filepath, 'w', encoding='utf-8') as f:
|
2509 |
+
json.dump(memory_data, f, ensure_ascii=False, indent=2)
|
2510 |
+
return True
|
2511 |
+
except Exception as e:
|
2512 |
+
print(f"κΈ°μ΅ μ μ₯ μ€ν¨: {e}")
|
2513 |
+
return False
|
2514 |
+
|
2515 |
+
def load_memory_from_file(self, filepath):
|
2516 |
+
"""νμΌμμ κΈ°μ΅ λ°μ΄ν°λ₯Ό λ‘λ"""
|
2517 |
+
try:
|
2518 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
2519 |
+
memory_data = json.load(f)
|
2520 |
+
self.import_memory(memory_data)
|
2521 |
+
return True
|
2522 |
+
except Exception as e:
|
2523 |
+
print(f"κΈ°μ΅ λ‘λ μ€ν¨: {e}")
|
2524 |
+
return False
|
2525 |
+
|
2526 |
+
def get_memory_summary(self):
|
2527 |
+
"""κΈ°μ΅ μμ€ν
μμ½ μ 보 λ°ν"""
|
2528 |
+
return self.conversation_memory.get_memory_summary()
|
2529 |
+
|
2530 |
+
def save_memory(self, filepath):
|
2531 |
+
"""κΈ°μ΅ λ°μ΄ν° μ μ₯"""
|
2532 |
+
return self.conversation_memory.export_to_json()
|
2533 |
+
|
2534 |
+
def load_memory(self, json_data):
|
2535 |
+
"""κΈ°μ΅ λ°μ΄ν° λ‘λ"""
|
2536 |
+
return self.conversation_memory.import_from_json(json_data)
|
2537 |
+
|
2538 |
+
def clear_session_memory(self, session_id):
|
2539 |
+
"""νΉμ μΈμ
μ κΈ°μ΅ μμ """
|
2540 |
+
if session_id in self.conversation_memory.user_profile:
|
2541 |
+
del self.conversation_memory.user_profile[session_id]
|
2542 |
+
|
2543 |
+
def get_relationship_status(self, session_id="default"):
|
2544 |
+
"""νμ¬ κ΄κ³ μν νμΈ"""
|
2545 |
+
if session_id in self.conversation_memory.medium_term:
|
2546 |
+
return self.conversation_memory.medium_term[session_id]["relationship_level"]
|
2547 |
+
return "μλ‘μ΄_λ§λ¨"
|
2548 |
+
|
2549 |
+
def get_context_for_response(self, personality_type, session_id="default"):
|
2550 |
+
"""μλ΅ μμ±μ μν 컨ν
μ€νΈ μ 보 μ 곡 (PersonaGenerator νΈν)"""
|
2551 |
+
recent_context = self.get_relevant_context("", session_id, max_history=3)
|
2552 |
+
|
2553 |
+
# κΈ°μ‘΄ memory_context νμμ λ§μΆ° λ°ν
|
2554 |
+
context = {
|
2555 |
+
"short_term_context": self._format_recent_conversations(recent_context["recent_conversations"]),
|
2556 |
+
"medium_term_insights": self._format_user_insights(recent_context["user_profile"]),
|
2557 |
+
"long_term_adaptations": self._format_keyword_insights(session_id)
|
2558 |
+
}
|
2559 |
+
|
2560 |
+
return context
|
2561 |
+
|
2562 |
+
def _format_recent_conversations(self, conversations):
|
2563 |
+
"""μ΅κ·Ό λν ν¬λ§·ν
"""
|
2564 |
+
if not conversations:
|
2565 |
+
return ""
|
2566 |
+
|
2567 |
+
formatted = "## π μ΅κ·Ό λν λ§₯λ½:\n"
|
2568 |
+
for conv in conversations[-3:]:
|
2569 |
+
formatted += f"μ¬μ©μ: {conv['user_message']}\n"
|
2570 |
+
formatted += f"λ: {conv['ai_response'][:50]}...\n\n"
|
2571 |
+
|
2572 |
+
return formatted
|
2573 |
+
|
2574 |
+
def _format_user_insights(self, user_profile):
|
2575 |
+
"""μ¬μ©μ μΈμ¬μ΄νΈ ν¬λ§·ν
"""
|
2576 |
+
if not user_profile:
|
2577 |
+
return ""
|
2578 |
+
|
2579 |
+
insights = f"## π― νμ
λ μ¬μ©μ νΉμ±:\n"
|
2580 |
+
insights += f"β’ λν νμ: {user_profile.get('message_count', 0)}ν\n"
|
2581 |
+
insights += f"β’ κ΄κ³ λ¨κ³: {user_profile.get('relationship_level', 'μ μ μμ')}\n"
|
2582 |
+
insights += f"β’ μν΅ μ€νμΌ: {user_profile.get('communication_style', 'νλ²ν¨')}\n"
|
2583 |
+
insights += f"β’ νκ· λ©μμ§ κΈΈμ΄: {user_profile.get('avg_message_length', 0):.0f}μ\n"
|
2584 |
+
|
2585 |
+
return insights
|
2586 |
+
|
2587 |
+
def _format_keyword_insights(self, session_id):
|
2588 |
+
"""ν€μλ κΈ°λ° μΈμ¬μ΄νΈ ν¬λ§·ν
"""
|
2589 |
+
top_keywords = self.get_top_keywords(limit=5)
|
2590 |
+
|
2591 |
+
if not top_keywords:
|
2592 |
+
return ""
|
2593 |
+
|
2594 |
+
insights = "## π μ£Όμ κ΄μ¬μ¬ λ° ν€μλ:\n"
|
2595 |
+
for word, data in top_keywords:
|
2596 |
+
insights += f"β’ {word} ({data['category']}): {data['total_frequency']}ν μΈκΈ\n"
|
2597 |
+
|
2598 |
+
return insights
|
2599 |
+
|
2600 |
def generate_personality_preview(persona_name, personality_traits):
|
2601 |
"""μ±κ²© νΉμ±μ κΈ°λ°μΌλ‘ ν λ¬Έμ₯ 미리보기 μμ± - κ·Ήλͺ
ν μ°¨λ³ν"""
|
2602 |
if not personality_traits:
|
requirements.txt
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
-
gradio
|
2 |
-
google-generativeai
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
numpy
|
10 |
-
matplotlib==3.7.2
|
|
|
1 |
+
gradio>=4.0.0
|
2 |
+
google-generativeai
|
3 |
+
python-dotenv
|
4 |
+
pillow
|
5 |
+
openai
|
6 |
+
requests
|
7 |
+
pandas
|
8 |
+
plotly
|
9 |
+
numpy
|
|