File size: 3,551 Bytes
56d96ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import streamlit as st
from openai import OpenAI
from typing import Iterator
import os
from phoenix.otel import register

tracer_provider = register(
  project_name=os.getenv('PHOENIX_PROJECT_NAME'),
  endpoint=os.getenv('PHOENIX_COLLECTOR_ENDPOINT'),
)

from openinference.instrumentation.openai import OpenAIInstrumentor
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)

# Configure page settings
st.set_page_config(
    page_title="LLM Taiwan Chat",
    page_icon="💬",
    layout="centered"
)

# Initialize session state for chat history and system prompt
if "messages" not in st.session_state:
    st.session_state.messages = []
if "system_prompt" not in st.session_state:
    st.session_state.system_prompt = ""

def stream_chat(prompt: str) -> Iterator[str]:
    """Stream chat responses from the LLM API"""
    client = OpenAI(
        api_key=os.getenv('API_KEY'),
        base_url=os.getenv('API_BASE_URL')
    )
    
    messages = []
    if st.session_state.system_prompt:
        messages.append({"role": "system", "content": st.session_state.system_prompt})
    messages.extend(st.session_state.messages)
    # messages.append({"role": "user", "content": prompt})
    
    stream = client.chat.completions.create(
        messages=messages,
        model=os.getenv('LLM_MODEL_NAME'),
        stream=True
    )
    
    for chunk in stream:
        if chunk.choices[0].delta.content is not None:
            yield chunk.choices[0].delta.content

def clear_chat_history():
    """Clear all chat messages and reset system prompt"""
    st.session_state.messages = []
    st.session_state.system_prompt = ""

def main():
    st.title("💬 LLM Taiwan Chat")
    
    # Add a clear chat button with custom styling
    col1, col2 = st.columns([6, 1])
    with col2:
        if st.button("🗑️", type="secondary", use_container_width=True):
            clear_chat_history()
            st.rerun()
    
    # System prompt input
    system_prompt = st.text_area(
        "System Prompt 設定:",
        value=st.session_state.system_prompt,
        help="設定 system prompt 來定義 AI 助理的行為和角色。開始對話後將無法修改。",
        height=100,
        disabled=len(st.session_state.messages) > 0  # 當有對話時設為唯讀
    )
    if not st.session_state.messages and system_prompt != st.session_state.system_prompt:
        st.session_state.system_prompt = system_prompt
    
    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.write(message["content"])
    
    # Chat input
    if prompt := st.chat_input("輸入您的訊息..."):
        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})
        
        # Display user message
        with st.chat_message("user"):
            st.write(prompt)
        
        # Display assistant response with streaming
        with st.chat_message("assistant"):
            response_placeholder = st.empty()
            full_response = ""
            
            # Stream the response
            for response_chunk in stream_chat(prompt):
                full_response += response_chunk
                response_placeholder.markdown(full_response + "▌")
            response_placeholder.markdown(full_response)
        
        # Add assistant response to chat history
        st.session_state.messages.append({"role": "assistant", "content": full_response})

if __name__ == "__main__":
    main()