practice_ai_app / app.py
mherlie's picture
sync changes
2de1759
import os
import streamlit as st
from openai import OpenAI # βœ… Import OpenAI's new client
# Load API key securely
api_key = os.getenv("OPENAI_API_KEY", st.secrets.get("OPENAI_API_KEY"))
# Initialize OpenAI client
client = OpenAI(api_key=api_key)
# Function to generate AI response
def call_openai_api(prompt, model, max_tokens, temperature):
try:
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
temperature=temperature
)
return response.choices[0].message.content # βœ… Correct format
except Exception as e:
return f"Error: {str(e)}"
# Streamlit UI
st.set_page_config(page_title="AI-Powered Text Generator", layout="centered")
st.title("πŸ“ AI-Powered Text Generation")
st.write("Generate text with AI by providing a prompt below!")
# User input fields
user_input = st.text_area("Enter your prompt here:", height=150)
col1, col2 = st.columns(2)
with col1:
model = st.selectbox("Choose AI Model:", ["gpt-3.5-turbo", "gpt-4"])
max_tokens = st.slider("Max Tokens (Word Length)", min_value=50, max_value=500, value=200, step=50)
with col2:
temperature = st.slider("Creativity Level (0 = Predictable, 1 = Creative)", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
# Generate button
if st.button("Generate Text"):
if user_input.strip():
st.subheader("Generated Response:")
result = call_openai_api(user_input, model, max_tokens, temperature)
st.write(result)
else:
st.warning("⚠️ Please enter a prompt before generating text.")
# Footer
st.markdown("---")
st.markdown("πŸ”Ή *Powered by OpenAI GPT* | πŸ”Ή *Developed with Streamlit*")