File size: 6,409 Bytes
bcc480e
fe25407
 
9b44ae3
fe25407
0d7840f
fe25407
 
e77f66b
22edb1a
 
 
 
 
 
 
 
 
 
 
 
675196d
610fa24
 
 
 
 
 
ff82155
6c65253
 
610fa24
 
6c65253
 
15004c6
6c65253
 
a8574a3
6c65253
 
610fa24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb51992
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06694e1
fb51992
 
 
06694e1
1b50abf
 
d6d1ed2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b50abf
d6d1ed2
21149ad
86e6fbb
 
 
 
 
 
fcbbcdf
86e6fbb
 
 
 
 
 
 
 
e951dfc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import streamlit as st
import numpy as np
from PIL import Image
 
def main():
    st.title("Haltmann Diffusion Algorithm [C] - 2022-20XX - This is just the gui")
 
    slider = st.slider("Slider", 0, 255, 128) # default value=128, min=0, max=255
 
    # Get user input via a text box - this will be the URL of the image to edit.    
    url = st.text_input("Enter URL of image to edit")

    # Load the image from the URL using pillow.  We'll need to use BytesIO instead of just passing in the URL since Pillow expects a file object.  
    response = requests.get(url)
    img = Image.open(BytesIO(response.content))

    # Resize the image so it's not giant - makes everything run faster!    
    img = img.resize((600,400))  

    # Convert the image to grayscale for simpler processing     
#     gray_img = img.convert('L')       <-- You can experiment with commenting this line out if you want color halftoning!  It tends to produce better results on images that are already pretty low contrast though (like screenshots).         Gray scale conversion often introduces additional artifacts too like banding or posterization which may or may not look good depending on your original image and what effect you're going for...  So feel free to play around with whether or not you convert to grayscale here!          If you do leave it commented out make sure you change all references below from 'gray_img' --> 'img'.        Also make sure that when we paste back into our final result at the end we use 'paste(img)' instead of 'paste(gray_img)'           One other thing worth noting is that some older versions of Pillow don't support .convert('LA') which is needed for doing alpha compositing with our resulting dithered PNGs later on - newer versions added support starting in late 2019 I believe... In any case if your version doesn't have it then converting directly to L will work fine and just ignore transparency entirely.]   gray_img = img.convert('LA')          <-- Uncomment this line instead if using a more recent version of Pillow (>= 6?) supporting .convert('LA').             This converts our input images directly into both grayscale AND adds full 8-bit alpha transparency channel simultaneously allowing us            easy access later when creating composite masks while avoiding having separate GRAYSCALE & RGB versions floating around taking up memory..             Although technically speaking even converting straight "L" above should add an empty/fully transparent alpha channel by default anyway right? Not 100% sure...             Either way including "A" above shouldn't hurt anything either so might as well just include it regardless :)               [Update 12/2019]: Apparently there's now an even easier way than using LA conversion thanks to @jeremycole who pointed me towards https://github.com/python-pillow/Pillow/issues/3973#issuecomment-529083824 !              Now we can simply pass `mode='1'` when loading our original input images and THAT automatically sets them up ready for 1-bit dithering without needing any further conversions!! Awesome :D                Try uncommenting THIS line below along with ALL subsequent references throughout rest of notebook switching from `gray_img` --> `onebit_image`. Should work identically otherwise :) onebit_image = ImageOps
 

import streamlit as st

from PIL import Image

import numpy as np
 
st.text("This app generates VQ-GAN prompts for generating inpaintings.")
st.text("To use this app, simply enter the desired text prompt and hit generate.")

 
@st.cache(allow_output_mutation=True)  # This decorator ensures that the function only runs once per session.  Otherwise, each time we generate a prompt, it would run again!   This is important because we don't want to keep re-generating prompts unnecessarily.  We only want to generate a new prompt when the user enters a new one.   If we didn't cache this function, each time we generated a new prompt, it would also regenerate all of the previous prompts!   Caching is an important concept in Streamlit apps - it can help make your apps much more efficient by avoiding unnecessary computations.

def generate_prompt(text):    # This is the function that actually generates our VQ-GAN prompt    It takes in a string (the text prompt) and outputs another string (the generated VQ-GAN prompt).     We'll use this function to actually generate our VQ-GAN prompts when the user hits "Generate".

     return "Enter text here" + text + "and hit generate!"
 
     st.write(generate_prompt(""))    # We start by writing an empty string - this will be replaced with our generated prompt when the user hits "Generate"
 
def inpaint(img, mask):

    """Inpaints the given image using the given mask.

 

    Args:

        img: The image to inpaint. Must be a 3-channel RGB image.

        mask: The inpainting mask. Must be a binary 3-channel image 

            with 1s indicating the area to inpaint and 0s indicating 

            the area to leave unchanged.

 

    Returns:

        The inpainted image as a 3-channel RGB numpy array.     """
## V0.2
import streamlit as st
import numpy as np
from PIL import Image
import requests
import io


st.set_option('deprecation.showfileUploaderEncoding', False)


@st.cache(allow_output_mutation=True)
def load_image(img):
    im = Image.open(img)
    return im

    
def main():

    st.title("Dall-E Patrya ")

    uploaded_file = st.file_uploader("Choose an image", type="jpg")

st.markdown("Create images from textual descriptions with Dall-PFT!")
st.button("Edit Photo")

import streamlit as st

st.title("VQGAN Inpainting App")
st.markdown("This app uses a pre-trained VQGAN model to inpaint images.")

@st.cache(allow_output_mutation=True)
def load_model():

    # Load the pre-trained VQGAN model

    return vqgan.load_model('vqgan')

    
def inpaint(img, model, coords):

    # Inpaint the selected region of the image using the VQGAN model

    inpainted = vqgan.inpaint(img, model, coords)

    return inpainted
## [C] Haltmann Earth Divison  [C] - 20XX
import streamlit as st
from PIL import Image, ImageOps
import numpy as np


def inpaint(image, mask):

    """Inpaints the given image using the Mask."""

    
    # Convert to float32 before passing to cv2.inpaint() function. Otherwise it returns a distorted output. 
    img = np.float32(image)  

    
    dst = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)