File size: 2,494 Bytes
8963649
7ac814b
d8a7a69
 
 
dd6065b
d8a7a69
 
f8f5463
 
d8a7a69
 
 
 
8963649
7ac814b
 
 
 
 
8963649
7ac814b
21eb8fb
2ee2e8d
7ac814b
 
 
da504da
7ac814b
8963649
 
7ac814b
8963649
 
dd6065b
 
8963649
 
bc6380e
8963649
 
 
96b08f7
8963649
 
 
 
 
7ac814b
 
 
 
 
 
 
ecc99f8
7ac814b
 
8963649
 
281f939
96b08f7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import streamlit as st
from openai import OpenAI
from typing import Iterator
import os
from phoenix.otel import register
from datetime import datetime

tracer_provider = register(
  project_name=st.secrets['PHOENIX_PROJECT_NAME'],
  endpoint="https://app.phoenix.arize.com/v1/traces"
)

from openinference.instrumentation.openai import OpenAIInstrumentor
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)

st.set_page_config(
    page_title="Taiwan Smol Chat",
    page_icon="🦉",
    layout="centered"
)

st.title("🦉Taiwan Smol Chat")
st.info('Model based on [lianghsun/Llama-3.2-Taiwan-3B-Instruct](https://huggingface.co/lianghsun/Llama-3.2-Taiwan-3B-Instruct)', icon="🧠")
st.warning('Playgroud 有可能因為 GPU 被挪用至生成資料使用導致暫時性無法對話。', icon="⚠️")

client = OpenAI(
    api_key=st.secrets['API_KEY'],
    base_url=st.secrets['API_BASE_URL'],
)

if "openai_model" not in st.session_state:
    st.session_state["openai_model"] = st.secrets['MODEL']

if "messages" not in st.session_state:
    current_date = datetime.now().strftime("%Y-%m-%d")
    st.session_state.messages = [{"role": "system", "content": f"現在的日期: {current_date}"}]

for message in st.session_state.messages:
    if message['role'] == "system": continue
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("來聊點什麼吧"):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)

    with st.chat_message("assistant"):
        stream = client.chat.completions.create(
            model=st.session_state["openai_model"],
            messages=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            stream=True,
            temperature=.2
        )
        response = st.write_stream(stream)
    st.session_state.messages.append(
        {"role": "assistant", "content": response})

# with st.container():
#     st.caption('Please be aware that current Large Language Models (LLMs) can exhibit “hallucinations,” producing plausible-sounding but inaccurate or fabricated information. It is crucial to carefully review and verify any content generated by LLMs to avoid misunderstandings or misinformation. Always cross-check facts and consult reliable sources before making important decisions based on LLM outputs.')