File size: 3,159 Bytes
6a3d8d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a43ca38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
024f641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d649c7
4ce5796
 
024f641
 
 
 
 
9b1f668
024f641
 
9b1f668
40197f2
4ce5796
024f641
 
 
 
 
8a1a76f
4ce5796
024f641
 
c3aaa36
 
 
 
 
4ce5796
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from interpreter import interpreter
import streamlit as st

output = interpreter.chat("hi, how are you")

st.write(output)





# import subprocess

# def run_terminal_command(command):
#     try:
#         # Run the terminal command and capture its output
#         output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
#         return output.decode("utf-8")  # Decode bytes to string
#     except subprocess.CalledProcessError as e:
#         # Handle errors if the command fails
#         return f"Error: {e.output.decode('utf-8')}"

# # Example command: list files in the current directory
# command = "ls"
# output = run_terminal_command(command)
# print(output)









# import streamlit as st
# import torch
# from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
# from huggingface_hub import hf_hub_download
# from safetensors.torch import load_file



# # Model Path/Repo Information
# base = "stabilityai/stable-diffusion-xl-base-1.0"
# repo = "ByteDance/SDXL-Lightning"
# ckpt = "sdxl_lightning_4step_unet.safetensors" 

# # Load model (Executed only once for efficiency)
# @st.cache_resource
# def load_sdxl_pipeline():
#     unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cpu", torch.float32)
#     unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cpu"))
#     pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float32, variant="fp16").to("cpu")
#     pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
#     return pipe


# # Streamlit UI
# st.title("Image Generation")
# prompt = st.text_input("Enter your image prompt:")

# if st.button("Generate Image"):
#     if not prompt:
#         st.warning("Please enter a prompt.")
#     else:
#         pipe = load_sdxl_pipeline()  # Load the pipeline from cache
#         with torch.no_grad():
#             image = pipe(prompt).images[0] 

#         st.image(image)





# GOOGLE_API_KEY = ""
# genai.configure(api_key=GOOGLE_API_KEY)
# model = genai.GenerativeModel('gemini-pro')

# def add_to_json(goal):
#     try:
#         with open("test.json", "r") as file:
#             data = json.load(file)
#     except FileNotFoundError:
#         data = {"goals": []}  # Create the file with an empty 'goals' list if it doesn't exist

#     new_item = {"Goal": goal}
#     data["goals"].append(new_item)

#     with open("test.json", "w") as file:
#         json.dump(data, file, indent=4)
        


# def main():
#     if prompt := st.chat_input("Hi, how can I help you?"):
#         goals_prompt = f"""Act as a personal assistant... {prompt} """  
#         completion = model.generate_content(goals_prompt)
#         add_to_json(prompt)

#         with st.chat_message("Assistant"):
#             st.write(completion.text)

        

#     # Display JSON Data
#     if st.button("Show JSON Data"):
#         with open("test.json", "r") as file:
#             data = json.load(file)
#         st.json(data)  # Streamlit's way to display JSON


# if __name__ == "__main__":
#     main()