import streamlit as st from PIL import Image import base64 import yaml import os import urllib.request import tarfile import subprocess from yaml.loader import SafeLoader st.set_page_config( #page_title="Semantic Search using OpenSearch", layout="wide", page_icon="/home/ubuntu/images/opensearch_mark_default.png" ) st.markdown(""" """,unsafe_allow_html=True) # with open('/home/ubuntu/AI-search-with-amazon-opensearch-service/OpenSearchApp/auth.yaml') as file: # config = yaml.load(file, Loader=SafeLoader) # authenticator = Authenticate( # config['credentials'], # config['cookie']['name'], # config['cookie']['key'], # config['cookie']['expiry_days'], # config['preauthorized'] # ) # name, authentication_status, username = authenticator.login('Login', 'main') AI_ICON = "images/opensearch-twitter-card.png" col_0_1,col_0_2,col_0_3= st.columns([10,50,85]) with col_0_1: st.image(AI_ICON, use_container_width='always') with col_0_2: st.markdown('

OpenSearch AI demos

',unsafe_allow_html=True) #st.header("OpenSearch AI demos")#,divider = 'rainbow' # with col_0_3: # st.markdown("Workshop",unsafe_allow_html=True) #st.header(":rewind: Demos available") st.write("") #st.write("----") #st.write("Choose a demo") st.write("") col_1_1,col_1_2,col_1_3 = st.columns([3,40,65]) with col_1_1: st.subheader(" ") with col_1_2: st.markdown('

Neural Search

',unsafe_allow_html=True) with col_1_3: demo_1 = st.button(":arrow_forward:",key = "demo_1") if(demo_1): st.switch_page('pages/Semantic_Search.py') st.write("") #st.page_link("pages/1_Semantic_Search.py", label=":orange[1. Semantic Search] :arrow_forward:") #st.button("1. Semantic Search") # image_ = Image.open('/home/ubuntu/images/Semantic_SEarch.png') # new_image = image_.resize((1500, 1000)) # new_image.save('images/semantic_search_resize.png') # st.image("images/semantic_search_resize.png") col_2_1,col_2_2,col_2_3 = st.columns([3,40,65]) with col_2_1: st.subheader(" ") with col_2_2: st.markdown('

Multimodal Conversational Search

',unsafe_allow_html=True) with col_2_3: demo_2 = st.button(":arrow_forward:",key = "demo_2") if(demo_2): st.switch_page('pages/Multimodal_Conversational_Search.py') st.write("") #st.header("2. Multimodal Conversational Search") # image_ = Image.open('images/RAG_.png') # new_image = image_.resize((1500, 1000)) # new_image.save('images/RAG_resize.png') # st.image("images/RAG_resize.png") col_3_1,col_3_2,col_3_3 = st.columns([3,40,65]) with col_3_1: st.subheader(" ") with col_3_2: st.markdown('
Agentic Shopping Assistant
',unsafe_allow_html=True)#New with col_3_3: demo_3 = st.button(":arrow_forward:",key = "demo_3") if(demo_3): st.switch_page('pages/AI_Shopping_Assistant.py') # with st.sidebar: # st.subheader("Choose a demo !") # """ # # """, isExist = os.path.exists("/home/user/images_retail") if not isExist: os.makedirs("/home/user/images_retail") metadata_file = urllib.request.urlretrieve('https://aws-blogs-artifacts-public.s3.amazonaws.com/BDB-3144/products-data.yml', '/home/user/products.yaml') img_filename,headers= urllib.request.urlretrieve('https://aws-blogs-artifacts-public.s3.amazonaws.com/BDB-3144/images.tar.gz', '/home/user/images_retail/images.tar.gz') print(img_filename) file = tarfile.open('/home/user/images_retail/images.tar.gz') file.extractall('/home/user/images_retail/') file.close() #remove images.tar.gz os.remove('/home/user/images_retail/images.tar.gz')