File size: 3,654 Bytes
3f78c39 d9a0196 7a6824f 0efe14d d9a0196 51ab142 f2b2cdc d9a0196 7a6824f d9a0196 7a6824f d9a0196 f71ed88 d9a0196 bad5227 a7b5bcb bad5227 5b847dd a7b5bcb 7a6824f d9a0196 a7b5bcb 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 7a6824f a7b5bcb 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 a7b5bcb d9a0196 4112e9f 7a6824f d9a0196 7a6824f d9a0196 7a6824f d9a0196 7a6824f a7b5bcb d9a0196 0efe14d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
""" TypeGPT
@author: NiansuhAI
@email: [email protected]
"""
import numpy as np
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('API_KEY') # Replace with your token
)
# Create supported models
model_links = {
"GPT-4o": "meta-llama/Meta-Llama-3-8B-Instruct",
"GPT-4": "meta-llama/Meta-Llama-3.1-70B-Instruct",
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models =[key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select a GPT model", models)
#Add reset button to clear conversation
st.sidebar.button('New Chat', on_click=reset_conversation) #Reset button
# Create a temperature slider
temp_values = st.sidebar.slider('ChatGPT Temperature', 0.0, 1.0, (0.5))
st.sidebar.markdown("Temperature in ChatGPT affects the quality and coherence of the generated text.")
st.sidebar.markdown("**For optimum results, we recommend selecting a temperature between 0.5 and 0.7**")
# Create model description
st.sidebar.markdown("*The content created may not be accurate.*")
st.sidebar.markdown("\n Our website: [Chat-GPT-Free.com](https://chat-gpt-free.com/).")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
#Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'[Chat-GPT-Free.com](https://chat-gpt-free.com/) with AI model {selected_model}')
# st.title(f'Chat-GPT-Free is now using {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Hi. I'm {selected_model}. How can I help you today?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,#0.5,
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
except Exception as e:
# st.empty()
response = "The GPT is overloaded!\
\n Repeat your request later :( "
st.write(response)
st.session_state.messages.append({"role": "assistant", "content": response})
|