Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
from openai import OpenAI | |
# Initialize the Nvidia API client using API Key stored in Streamlit secrets | |
client = OpenAI( | |
base_url="https://integrate.api.nvidia.com/v1", # Nvidia API endpoint | |
api_key=st.secrets["NVIDIA_API_KEY"] # Nvidia API Key from Streamlit secrets | |
) | |
# Define Streamlit app layout | |
st.title("AWS Well-Architected Review") | |
st.write("Get recommendations for optimizing your AWS architecture.") | |
# Session state to store messages (conversation history) | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# User input for AWS architecture description | |
architecture_input = st.text_area("Describe your AWS architecture:") | |
# Button to submit the input | |
if st.button("Get Recommendations"): | |
if architecture_input: | |
# Add user input to the conversation | |
st.session_state.messages.append({"role": "user", "content": architecture_input}) | |
with st.chat_message("assistant"): | |
with st.spinner("Generating recommendations..."): | |
# Create Nvidia completion request with conversation history | |
stream = client.chat.completions.create( | |
model="nvidia-llama-3.1-70b-instruct", # Nvidia model name | |
messages=st.session_state.messages, # Include all messages in the API call | |
temperature=0.5, | |
top_p=0.7, | |
max_tokens=1024, | |
stream=True, | |
) | |
response_chunks = [] | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
response_chunks.append(chunk.choices[0].delta.content) | |
response = "".join(response_chunks) | |
# Display the response as recommendations | |
st.markdown(f"**Recommendations:**\n\n{response}") | |
# Add response to conversation history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |