Spaces:
Runtime error
Runtime error
File size: 5,679 Bytes
e491046 04dcf7d e491046 04dcf7d e491046 4a76a56 e491046 95f4eea e491046 4a76a56 04dcf7d e491046 c6bdca0 e491046 c6bdca0 4a76a56 c6bdca0 56ba843 e491046 c6bdca0 0807974 c6bdca0 e491046 c6bdca0 e491046 0807974 e491046 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import gradio as gr
from share_btn import community_icon_html, loading_icon_html, share_js
import re
import os
#hf_token = os.environ.get('HF_TOKEN')
import openai
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
from gradio_client import Client
#client = Client("https://fffiloni-test-llama-api-debug.hf.space/", hf_token=hf_token)
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
def get_text_after_colon(input_text):
# Find the first occurrence of ":"
colon_index = input_text.find(":")
# Check if ":" exists in the input_text
if colon_index != -1:
# Extract the text after the colon
result_text = input_text[colon_index + 1:].strip()
return result_text
else:
# Return the original text if ":" is not found
return input_text
def infer(image_input, audience):
gr.Info('Calling CLIP Interrogator, ์ด๋ฏธ์ง๋ฅผ ํด์ํ๊ณ ์์ต๋๋ค...')
clipi_result = clipi_client.predict(
image_input, # str (filepath or URL to image) in 'parameter_3' Image component
"best", # str in 'Select mode' Radio component
4, # int | float (numeric value between 2 and 24) in 'best mode max flavors' Slider component
api_name="/clipi2"
)
print(clipi_result)
llama_q = f"""
I'll give you a simple image caption, please provide a fictional story for a {audience} audience that would fit well with the image. Please be creative, do not worry and only generate a cool fictional story.
Here's the image description:
'{clipi_result[0]}'
ํ๊ตญ์ด๋ก ๋ต๋ณํด์ค.
"""
gr.Info('Calling ChatGPT, ์ด์ผ๊ธฐ๋ฅผ ๋ง๋ค๊ณ ์์ต๋๋ค...')
#result = client.predict(
# llama_q, # str in 'Message' Textbox component
# "I2S",
# api_name="/predict"
#)
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": llama_q}])
result = chat_completion.choices[0].message.content
print(f"Llama2 result: {result}")
result = get_text_after_colon(result)
# Split the text into paragraphs based on actual line breaks
paragraphs = result.split('\n')
# Join the paragraphs back with an extra empty line between each paragraph
formatted_text = '\n'.join(paragraphs)
return formatted_text, gr.Group.update(visible=True)
css="""
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
a {text-decoration-line: underline; font-weight: 600;}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex;
padding-left: 0.5rem !important;
padding-right: 0.5rem !important;
background-color: #000000;
justify-content: center;
align-items: center;
border-radius: 9999px !important;
max-width: 13rem;
}
div#share-btn-container > div {
flex-direction: row;
background: black;
align-items: center;
}
#share-btn-container:hover {
background-color: #060606;
}
#share-btn {
all: initial;
color: #ffffff;
font-weight: 600;
cursor:pointer;
font-family: 'IBM Plex Sans', sans-serif;
margin-left: 0.5rem !important;
padding-top: 0.5rem !important;
padding-bottom: 0.5rem !important;
right:0;
}
#share-btn * {
all: unset;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
#share-btn-container.hidden {
display: none!important;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
<h1 style="text-align: center">Image to Story - Korean</h1>
<p style="text-align: center">์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ์ธ์, ChatGPT๋ฅผ ํตํด ํ๊ตญ์ด๋ก ์ด์ผ๊ธฐ๋ฅผ ๋ง๋ค์ด ์ค๋๋ค!</p>
<p style="text-align: center">์๋ณธ https://huggingface.co/spaces/fffiloni/Image-to-Story ์์ ํ๊ตญ์ด๋ก ๊ธ์ ์์ฑํ๊ฒ ํ๊ณ , Llama ๋ฅผ ChatGPT ๋ก ์์ ํ ๊ฒ์
๋๋ค.</p>
<p style="text-align: center">ChatGPT ์๋ต์ด ์ค๋ ์ง์ฐ๋๊ฑฐ๋ ์ฌ์ฉ์ ํ์ผ๋ก ์๋ ๋๊ฐ ์์ต๋๋ค.</p>
"""
)
with gr.Row():
with gr.Column():
image_in = gr.Image(label="์ด๋ฏธ์ง ์
๋ ฅ", type="filepath", elem_id="image-in", height=420)
audience = gr.Radio(label="๋์", choices=["Children", "Adult"], value="Children")
submit_btn = gr.Button('๊ธ์ ๋ง๋ค์ด ์ฃผ์ธ์')
with gr.Column():
#caption = gr.Textbox(label="Generated Caption")
story = gr.Textbox(label="์์ฑ๋ ์คํ ๋ฆฌ", elem_id="story")
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
community_icon = gr.HTML(community_icon_html)
loading_icon = gr.HTML(loading_icon_html)
share_button = gr.Button("Share to community", elem_id="share-btn")
gr.Examples(examples=[["./examples/crabby.png", "Children"],["./examples/hopper.jpeg", "Adult"]],
fn=infer,
inputs=[image_in, audience],
outputs=[story, share_group],
cache_examples=True
)
submit_btn.click(fn=infer, inputs=[image_in, audience], outputs=[story, share_group])
share_button.click(None, [], [], _js=share_js)
demo.queue(max_size=12).launch()
|