import gradio as gr import requests import os import PIL from PIL import Image from PIL import ImageDraw from PIL import ImageFont ##Bloom API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" HF_TOKEN = os.environ["HF_TOKEN"] headers = {"Authorization": f"Bearer {HF_TOKEN}"} #Complete below sentence in fun way. prompt4 = """Distracted from: hubble by: james webb Distracted from: homework by: side project Distracted from: goals by: new goals Distracted from: """ prompt5 = """Distracted from: homework by: side project Distracted from: goals by: new goals Distracted from: working hard by: hardly working Distracted from: twitter by: open in browser Distracted from: """ """Distracted from: homework by: side project Distracted from: goals by: new goals Distracted from: working hard by: hardly working Distracted from: twitter by: open in browser Distracted from: code by: blog post Distracted from: code by: blog post Distracted from:""" #prompt = """Distracted from: homework\nby: side project\nDistracted from: goals\nby: new goals\nDistracted from: working hard\nby: hardly working\nDistracted from: twitter\nby: open in browser\nDistracted from:""" def write_on_image(final_solution): print("************ Inside write_on_image ***********") image_path0 = "./distracted0.jpg" image0 = Image.open(image_path0) I1 = ImageDraw.Draw(image0) myfont = ImageFont.truetype('./font1.ttf', 30) prompt_list = final_solution.split('\n') girlfriend = prompt_list[8].split(':')[1].strip() print(f"girlfriend is : {girlfriend }") new_girl = prompt_list[9].split(':')[1].strip() print(f"new_girl is : {new_girl}") prompt_list.pop(0) prompt_list.pop(0) prompt_list = prompt_list[:8] prompt_list.append('Distracted from:') print(f"prompt list is : {prompt_list}") new_prompt = '\n'.join(prompt_list) print(f"final_solution is : {new_prompt}") I1.text((613, 89), girlfriend,font=myfont, fill =(255, 255, 255)) I1.text((371, 223), "ME", font=myfont, fill =(255, 255, 255)) I1.text((142, 336), new_girl,font=myfont, fill =(255, 255, 255)) return image0, new_prompt def meme_generate(img, prompt, temp, top_p): #prompt, generated_txt): #, input_prompt_sql ): #, input_prompt_dalle2): print(f"*****Inside meme_generate - Prompt is :{prompt}") if len(prompt) == 0: prompt = """Distracted from: homework\nby: side project\nDistracted from: goals\nby: new goals\nDistracted from: working hard\nby: hardly working\nDistracted from: twitter\nby: open in browser\nDistracted from:""" json_ = {"inputs": prompt, "parameters": { #"top_p": 0.95, "top_p": top_p, #0.90, #"top_k":0, "max_new_tokens": 250, "temperature": temp, #1.1, #"num_return_sequences": 3, "return_full_text": True, "do_sample": True, }, "options": {"use_cache": True, "wait_for_model": True, },} response = requests.post(API_URL, headers=headers, json=json_) print(f"Response is : {response}") output = response.json() print(f"output is : {output}") output_tmp = output[0]['generated_text'] print(f"output_tmp is: {output_tmp}") solution = output_tmp.split("\nQ:")[0] print(f"Final response after splits is: {solution}") meme_image, new_prompt = write_on_image(solution) return meme_image, new_prompt #final_solution #display_output, new_prompt #generated_txt+prompt demo = gr.Blocks() with demo: gr.Markdown("