Spaces:
Sleeping
Sleeping
File size: 4,809 Bytes
56d96ca cbc8b51 56d96ca 066960f 56d96ca 066960f 56d96ca 066960f 56d96ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import streamlit as st
from openai import OpenAI
from typing import Iterator
import os
from phoenix.otel import register
tracer_provider = register(
project_name=os.getenv('PHOENIX_PROJECT_NAME'),
endpoint=os.getenv('PHOENIX_COLLECTOR_ENDPOINT'),
)
from openinference.instrumentation.openai import OpenAIInstrumentor
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)
# Configure page settings
st.set_page_config(
page_title="LLM Taiwan Chat",
page_icon="💬",
layout="centered"
)
# Display privacy notice
st.markdown("""
> **隱私權聲明**
>
> 使用本聊天服務即表示您同意:
> - 您的對話內容可能被用於改善服務品質
> - 對話紀錄可能作為系統訓練與評估的素材
> - 請勿在對話中透露任何個人隱私資訊
""")
st.markdown("---")
# Initialize session state for chat history and system prompt
if "messages" not in st.session_state:
st.session_state.messages = []
if "system_prompt" not in st.session_state:
st.session_state.system_prompt = ""
if "temperature" not in st.session_state:
st.session_state.temperature = 0.2
if "top_p" not in st.session_state:
st.session_state.top_p = 0.95
def stream_chat(prompt: str) -> Iterator[str]:
"""Stream chat responses from the LLM API"""
client = OpenAI(
api_key=os.getenv('API_KEY'),
base_url=os.getenv('API_BASE_URL')
)
messages = []
if st.session_state.system_prompt:
messages.append({"role": "system", "content": st.session_state.system_prompt})
messages.extend(st.session_state.messages)
stream = client.chat.completions.create(
messages=messages,
model=os.getenv('LLM_MODEL_NAME'),
stream=True,
temperature=st.session_state.temperature,
top_p=st.session_state.top_p
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
yield chunk.choices[0].delta.content
def clear_chat_history():
"""Clear all chat messages and reset system prompt"""
st.session_state.messages = []
st.session_state.system_prompt = ""
def main():
st.title("💬 LLM Taiwan Chat")
# Add a clear chat button with custom styling
col1, col2 = st.columns([6, 1])
with col2:
if st.button("🗑️", type="secondary", use_container_width=True):
clear_chat_history()
st.rerun()
# Advanced options in expander
with st.expander("進階選項 ⚙️", expanded=False):
# System prompt input
system_prompt = st.text_area(
"System Prompt 設定:",
value=st.session_state.system_prompt,
help="設定 system prompt 來定義 AI 助理的行為和角色。開始對話後將無法修改。",
height=100,
disabled=len(st.session_state.messages) > 0 # 當有對話時設為唯讀
)
if not st.session_state.messages and system_prompt != st.session_state.system_prompt:
st.session_state.system_prompt = system_prompt
st.session_state.temperature = st.slider(
"Temperature",
min_value=0.0,
max_value=2.0,
value=st.session_state.temperature,
step=0.1,
help="較高的值會使輸出更加隨機,較低的值會使其更加集中和確定。"
)
st.session_state.top_p = st.slider(
"Top P",
min_value=0.1,
max_value=1.0,
value=st.session_state.top_p,
step=0.05,
help="控制模型輸出的多樣性,較低的值會使輸出更加保守。"
)
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Chat input
if prompt := st.chat_input("輸入您的訊息..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.write(prompt)
# Display assistant response with streaming
with st.chat_message("assistant"):
response_placeholder = st.empty()
full_response = ""
# Stream the response
for response_chunk in stream_chat(prompt):
full_response += response_chunk
response_placeholder.markdown(full_response + "▌")
response_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
if __name__ == "__main__":
main() |