import gradio as gr from blindbox.requests import SecureSession DEMO_SERVER = "4.208.9.167:80" def run_query( server, prompt): if server == "Non-confidential VM server": return ("ā Error: you can only connect to an application running on a Confidential VM") POLICY = "./cce_policy.txt" if prompt == None: return ("ā Error: please provide input code") try: with SecureSession(f"http://{DEMO_SERVER}", POLICY) as secure_session: res = secure_session.post(endpoint="/generate", json={"input_text": prompt}) cleaned = res.text.replace('\\n', '\n').split('\n\n')[0].split(':"')[1] cleaned = cleaned.replace('\\', '') return(cleaned + "\n\nā Input is end-to-end protected\nUser data is protected by a highly isolated and secure environment during runtime, meaning we, as the service providers, cannot access your input!") except Exception as err: return(f"ā Query failed!\n{err}") with gr.Blocks(css=".gradio-container {background-color: #20233fff}") as demo: gr.Markdown("
This is our demo for our article on deploying code generation LLM models with BlindBox. The user input is end-to-end protected.
") gr.Markdown("You can see how we deployed the model in the integration section of our documentation!
") gr.Markdown("ā ļø BlindBox is still under development. Do not test with production data!
") _, colum_2, _ = gr.Column(scale=1), gr.Column(scale=6), gr.Column(scale=1) with colum_2: prompt = gr.Code(lines=3, language="python", label="Input code", value="def hello_name(name):") with gr.Accordion("Advanced settings", open=False): server = gr.Radio( ["Authentic confidential VM server", "Non-confidential VM server"], label="Test connections to secure and insecure servers" ) trigger = gr.Button(label="Run query") with gr.Column(): output = gr.Textbox(placeholder="Output", label="Output") trigger.click(fn=run_query, inputs=[server, prompt], outputs=output) gr.HTML(label="Contact", value="