practice_ai_app / app.py
mherlie's picture
changes
de34f97
raw
history blame
6.56 kB
import os
import streamlit as st
import openai
# -----------------------------------------------------------------------------
# Configuration and API Key Setup
# -----------------------------------------------------------------------------
# For security, it is recommended to set your API key as an environment variable
# or use Streamlit's secrets management. For example, in Streamlit Cloud, you can
# add openai.api_key in your secrets.toml.
openai.api_key = os.getenv("OPENAI_API_KEY", "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")
# -----------------------------------------------------------------------------
# Helper Functions
# -----------------------------------------------------------------------------
def generate_prompt(topic, output_format, tone, length, creativity, creative_mode, fact_checking):
"""
Dynamically constructs the prompt based on the user's inputs.
"""
# Start with the basic topic text
prompt = f"Topic: {topic}\n\n"
# Specify the desired output format and tone
prompt += f"Please generate a {output_format} with a {tone} tone.\n"
# Include any additional instructions based on options
if creative_mode:
prompt += "Use creative language and storytelling techniques.\n"
if fact_checking:
prompt += "Ensure the facts mentioned are accurate.\n"
# Add instructions for length and creativity level
prompt += f"The text should be approximately {length} words long.\n"
prompt += f"Please adjust the creativity level to {creativity} (scale 1-10).\n"
return prompt
def call_openai_api(prompt, num_responses):
"""
Calls the OpenAI API with the constructed prompt and returns generated responses.
"""
try:
responses = []
for _ in range(num_responses):
# You can adjust model name and parameters as needed.
response = openai.Completion.create(
engine="text-davinci-003", # or use any model of your choice
prompt=prompt,
max_tokens=300, # Adjust based on expected output length
temperature=0.7, # Adjust creativity level (temperature)
top_p=1,
n=1,
stop=None,
)
generated_text = response.choices[0].text.strip()
responses.append(generated_text)
return responses
except Exception as e:
st.error(f"An error occurred while generating text: {e}")
return None
# -----------------------------------------------------------------------------
# Streamlit User Interface
# -----------------------------------------------------------------------------
st.title("AI-Powered Text Generation App")
st.write("Generate text using AI based on your inputs.")
# --- Input Fields ---
# Text area for providing the core topic or initial text.
topic = st.text_area("Enter the topic or initial text", placeholder="Type your topic here...", height=150)
# Dropdown for selecting the desired output format.
output_format = st.selectbox(
"Select the output format",
options=["Story", "Poem", "Article", "Code"],
index=0
)
# Dropdown for selecting the desired tone or style.
tone = st.selectbox(
"Select the tone or style",
options=["Formal", "Informal", "Humorous", "Technical"],
index=0
)
# Slider for controlling the approximate length of the generated text.
length = st.slider("Select the approximate word count", min_value=50, max_value=1000, value=200, step=50)
# Slider for controlling the creativity level.
creativity = st.slider("Set the creativity level (1: less creative, 10: more creative)", min_value=1, max_value=10, value=7)
# Numeric input for specifying the number of responses to generate.
num_responses = st.number_input("Number of responses to generate", min_value=1, max_value=5, value=1, step=1)
# Checkboxes for enabling or disabling specific features.
creative_mode = st.checkbox("Enable creative mode", value=True)
fact_checking = st.checkbox("Enable fact-checking", value=False)
# Button to trigger text generation.
if st.button("Generate Text"):
if not topic.strip():
st.warning("Please enter a topic or some initial text before generating content.")
else:
with st.spinner("Generating text..."):
# Construct the prompt from the user inputs.
prompt = generate_prompt(topic, output_format, tone, length, creativity, creative_mode, fact_checking)
st.write("**Constructed Prompt:**")
st.code(prompt, language="text")
# Call the OpenAI API to generate text responses.
responses = call_openai_api(prompt, num_responses)
if responses:
st.success("Text generated successfully!")
# Display each response in a well-formatted manner.
for idx, response in enumerate(responses, start=1):
st.markdown(f"### Response {idx}")
# Use different formatting based on output format.
if output_format.lower() == "code":
st.code(response, language="python")
else:
st.write(response)
# Feedback section (placeholder for like/dislike buttons and comments)
col1, col2 = st.columns(2)
with col1:
if st.button(f"πŸ‘ Like (Response {idx})"):
st.info("Thank you for your feedback!")
with col2:
if st.button(f"πŸ‘Ž Dislike (Response {idx})"):
st.info("Thank you for your feedback!")
# Optionally, you can add a text input for comments.
feedback = st.text_input(f"Leave a comment for Response {idx} (optional)", key=f"feedback_{idx}")
if feedback:
st.write("Your comment:", feedback)
# -----------------------------------------------------------------------------
# Additional Considerations
# -----------------------------------------------------------------------------
st.markdown("---")
st.info("This is a prototype application. In a production environment, ensure to implement proper security, error logging, and user authentication as needed.")