File size: 2,397 Bytes
e80fad1
 
 
 
 
 
 
4bda9c3
 
e80fad1
4bda9c3
 
e80fad1
4bda9c3
e80fad1
 
 
 
 
4bda9c3
 
e80fad1
 
 
 
 
 
 
 
4bda9c3
e80fad1
 
 
 
 
 
 
4bda9c3
 
 
 
 
e80fad1
4bda9c3
 
 
e80fad1
4bda9c3
 
 
 
e80fad1
4bda9c3
 
 
 
e80fad1
4bda9c3
 
e80fad1
4bda9c3
 
e80fad1
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import warnings
warnings.filterwarnings("ignore")
## import necessary packages

import os
from typing import Any, Union,Dict, List
import numpy as np
import io
import base64
import requests
import tensorflow as tf
import tensorflow_hub as hub
from PIL import Image
from helper import *
from fastapi import FastAPI
 
# Create a new FastAPI app instance
app = FastAPI()

hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'
hub_module = hub.load(hub_handle)


 
# Define a function to handle the GET request at `/generate`
# The generate() function is defined as a FastAPI route that takes a 
# string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response 
# containing the generated text under the key "output"
@app.get("/generate")
def generate(features: Dict[str, str]):
    """
    Using the text summarization pipeline from `transformers`, summerize text
    from the given input text. The model used is `philschmid/bart-large-cnn-samsum`, which
    can be found [here](<https://huggingface.co/philschmid/bart-large-cnn-samsum>).
    """
    # Use the pipeline to generate text from the given input text
    
    content_image_url = features['url']
    if 'style_url' not in features:
        style_image_url = 'https://upload.wikimedia.org/wikipedia/commons/c/c5/Edvard_Munch%2C_1893%2C_The_Scream%2C_oil%2C_tempera_and_pastel_on_cardboard%2C_91_x_73_cm%2C_National_Gallery_of_Norway.jpg'
    else:
        style_image_url = features['style_url']

    # Load images
    content_img_size = (500, 500)
    style_img_size = (300, 300)

    style_image = load_image(style_image_url, style_img_size)
    content_image = load_image(content_image_url, content_img_size)
    style_image = tf.nn.avg_pool(
        style_image, ksize=[3, 3], strides=[1, 1], padding='SAME')

    # Stylize content image with given style image.
    outputs = hub_module(tf.constant(content_image),
                              tf.constant(style_image))
    stylized_image = outputs[0]

    # get PIL image and convert to base64
    img = Image.fromarray(np.uint8(stylized_image.numpy()[0] * 255))
    im_file = io.BytesIO()
    img.save(im_file, format="PNG")
    im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
    # Return the generated text in a JSON response
    return {"output": im_bytes}