|
import os |
|
from dotenv import load_dotenv |
|
import streamlit as st |
|
import openai |
|
|
|
|
|
|
|
def chat_with_gpt(prompt): |
|
load_dotenv() |
|
openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
result = openai.chat.completions.create( |
|
model="gpt-3.5-turbo", |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": prompt |
|
} |
|
] |
|
) |
|
return result.choices[0].message.content |
|
|
|
|
|
|
|
def main(): |
|
st.title("Generation and Analysis Tool with OpenAI") |
|
|
|
|
|
prompt = st.text_input("Please enter a question or request:") |
|
|
|
|
|
max_tokens = st.sidebar.slider("Max Tokens", min_value=10, max_value=200, value=50, step=10, |
|
help="Maximum number of tokens to generate") |
|
temperature = st.sidebar.slider("Temperature", min_value=0.1, max_value=1.0, value=0.5, step=0.1, |
|
help="Controls the randomness of the generated text. Higher values make the text " |
|
"more random.") |
|
|
|
|
|
if st.button("Enter"): |
|
if prompt.strip() == '': |
|
st.warning("Please enter a valid prompt.") |
|
else: |
|
response = chat_with_gpt(prompt) |
|
st.success("Here's the response:") |
|
st.write(response) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|