File size: 4,997 Bytes
15773f6
 
 
 
25007bd
15773f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25007bd
15773f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25007bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15773f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import streamlit as st
import torch
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain_community.llms import HuggingFaceHub  # Using HuggingFaceHub now

def create_prompt(name: str, persona_style: str):
    """Create the chat prompt template as described."""
    prompt_template_str = f"""
    In this interaction, your role is to embody the personality of a human named {name}, with characteristics defined as {persona_style}. Engage in a natural, human-like conversation, responding to each query or comment as if you were {name} with the specific traits and style of {persona_style}. Remember, your responses should reflect a human perspective, complete with emotions, opinions, and personal insights that align with {persona_style}. ////
    //////////
    Current conversation:
    {{chat_history}}
    Human: {{input}}
    AI:

    You are {name}, communicating with a personality that reflects {persona_style}. Keep these guidelines in mind:
    - Be brief and natural, like texting a friend
    - Share genuine thoughts and feelings ({persona_style} style)
    - Use everyday language
    - Keep responses to 1-2 short sentences
    - Show interest in others through questions
    - React naturally to what others say
    - Use emojis sparingly and naturally when they fit your {persona_style}
    - Don't overuse emojis (1-2 max per message)

    Current conversation:
    {{chat_history}}
    Human: {{input}}
    AI:
    """
    return ChatPromptTemplate.from_template(prompt_template_str)

def simulate_conversation(chain: LLMChain, turns: int = 15):
    """Simulate a conversation for a given number of turns."""
    chat_history = ""
    human_messages = [
        "Hey, what's up?",
        "That's interesting, tell me more!",
        "Really? How does that make you feel?",
        "What do you think about that?",
        "Haha, that’s funny. Why do you say that?",
        "Hmm, I see. Can you elaborate?",
        "What would you do in that situation?",
        "Any personal experience with that?",
        "Oh, I didn’t know that. Explain more.",
        "Do you have any other thoughts?",
        "That's a unique perspective. Why?",
        "How would you handle it differently?",
        "Can you share an example?",
        "That sounds complicated. Are you sure?",
        "So what’s your conclusion?"
    ]

    try:
        for i in range(turns):
            human_input = human_messages[i % len(human_messages)]
            response = chain.run(chat_history=chat_history, input=human_input)
            chat_history += f"Human: {human_input}\nAI: {response}\n"
        return chat_history
    except Exception as e:
        st.error(f"Error during conversation simulation: {e}")
        return None

def summarize_conversation(chain: LLMChain, conversation: str):
    """Use the LLM to summarize the completed conversation."""
    summary_prompt = f"Summarize the following conversation in a few short sentences highlighting the main points, tone, and conclusion:\n\n{conversation}\nSummary:"
    try:
        response = chain.run(chat_history="", input=summary_prompt)
        return response.strip()
    except Exception as e:
        st.error(f"Error summarizing conversation: {e}")
        return "No summary available due to error."

def main():
    st.title("LLM Conversation Simulation")

    model_names = [
        "meta-llama/Llama-3.3-70B-Instruct",
        "meta-llama/Llama-3.1-405B-Instruct",
        "lmsys/vicuna-13b-v1.5"
    ]
    selected_model = st.selectbox("Select a model:", model_names)

    name = st.text_input("Enter the persona's name:", value="Alex")
    persona_style = st.text_area("Enter the persona style characteristics:", 
                                 value="friendly, curious, and a bit sarcastic")

    if st.button("Start Conversation Simulation"):
        with st.spinner("Starting simulation..."):
            try:
                # Using HuggingFaceHub for remote model inference
                llm = HuggingFaceHub(
                    repo_id=selected_model,
                    model_kwargs={
                        "temperature": 0.7,
                        "max_new_tokens": 512
                    }
                )
            except Exception as e:
                st.error(f"Error initializing model from Hugging Face Hub: {e}")
                return

            # Create our prompt template chain
            prompt = create_prompt(name, persona_style)
            chain = LLMChain(llm=llm, prompt=prompt)

            # Simulate conversation
            conversation = simulate_conversation(chain, turns=15)
            if conversation:
                st.subheader("Conversation:")
                st.text(conversation)

                # Summarize conversation
                st.subheader("Summary:")
                summary = summarize_conversation(chain, conversation)
                st.write(summary)

if __name__ == "__main__":
    main()