File size: 4,002 Bytes
3f78c39 d9a0196 7a6824f 0efe14d d9a0196 b71abe0 17d6d48 c7d977c d9a0196 7a6824f d9a0196 7a6824f d9a0196 c7d977c d9a0196 bad5227 5b847dd c7d977c 7a6824f d9a0196 c7d977c 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 7a6824f f9d53f3 c7d977c 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 c7d977c d9a0196 4112e9f 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 7a6824f 81eddea d9a0196 0efe14d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
""" TypeGPT
@author: NiansuhAI
@email: [email protected]
"""
import numpy as np
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('API_KEY') # Replace with your token
)
# Create supported models
model_links = {
"GPT-4o": "mistralai/Mistral-Nemo-Instruct-2407",
"GPT-4": "meta-llama/Meta-Llama-3-8B-Instruct",
"GPT-3,5": "meta-llama/Meta-Llama-3.1-70B-Instruct",
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models =[key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Выбрать модель GPT", models)
#Add reset button to clear conversation
st.sidebar.button('Новый чат', on_click=reset_conversation) #Reset button
# Create a temperature slider
temp_values = st.sidebar.slider('Температура ChatGPT', 0.0, 1.0, (0.5))
st.sidebar.markdown("Температура в ChatGPT влияет на качество и связность генерируемого текста.")
st.sidebar.markdown("**Для оптимального результата рекомендуем выбирать температуру в диапазоне от 0,5 до 0,7**.")
# Create model description
st.sidebar.markdown("*Созданный контент может быть неточным.*")
st.sidebar.markdown("\n Наш сайт: [GPT-ChatBot.ru](https://gpt-chatbot.ru/).")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
#Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'[GPT-ChatBot.ru](https://gpt-chatbot.ru/) с моделью {selected_model}')
# st.title(f'GPT-ChatBot сейчас использует {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Привет. Я {selected_model}. Как я могу вам помочь сегодня?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,#0.5,
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
except Exception as e:
# st.empty()
response = "Похоже, чат перегружен!\
\n Повторите свой запрос позже:( "
st.write(response)
st.session_state.messages.append({"role": "assistant", "content": response})
|