import gradio as gr from blindbox.requests import SecureSession DEMO_SERVER = "4.208.9.167:80" bullets = "

Features coming soon:

" def run_query( server, prompt): if server == "Non-confidential VM server": return ("⛔ Error: you can only connect to an application running on a Confidential VM") POLICY = "./cce_policy.txt" if prompt == None: return ("⛔ Error: please provide input code") message = "\n\n✅ Secure query succesful" try: with SecureSession(f"http://{DEMO_SERVER}", POLICY) as secure_session: res = secure_session.post(endpoint="/generate", json={"input_text": prompt}) cleaned = res.text.replace('\\n', '\n').split('\n\n')[0].split(':"')[1] cleaned = cleaned.replace('\\', '') return(cleaned + message) except Exception as err: return(f"⛔ Query failed!\n{err}") with gr.Blocks(css=".gradio-container {background-color: #20233fff;}") as demo: gr.Markdown("

🎅 SantaCoder with BlindBox: Private Code Generation

") gr.Markdown("

This is our demo for our article on deploying code generation LLM models with BlindBox. The user input is protected during computation.

") gr.Markdown("

You can see how we deployed the model in the integration section of our documentation!

") gr.Markdown("

⚠️ BlindBox is still under development. Do not test with production data!

") gr.Markdown(value=bullets) _, colum_2, _ = gr.Column(scale=1), gr.Column(scale=6), gr.Column(scale=1) with colum_2: prompt = gr.Code(lines=3, language="python", label="Input code", value="def hello_name(name):") with gr.Accordion("Advanced settings", open=False): server = gr.Radio( ["Authentic confidential VM server", "Non-confidential VM server"], label="Test connections to secure and insecure servers" ) trigger = gr.Button(label="Run query") with gr.Column(): output = gr.Textbox(placeholder="Output", label="Output") trigger.click(fn=run_query, inputs=[server, prompt], outputs=output) gr.HTML(label="Contact", value="contact") if __name__ == "__main__": demo.launch()