AMfeta99 commited on
Commit
111b74e
·
verified ·
1 Parent(s): a4e2a10

Delete app_test.py

Browse files
Files changed (1) hide show
  1. app_test.py +0 -129
app_test.py DELETED
@@ -1,129 +0,0 @@
1
- from huggingface_hub import InferenceClient
2
- from langchain_community.tools import DuckDuckGoSearchResults
3
- from langchain.agents import create_react_agent, AgentExecutor
4
- from langchain_core.tools import BaseTool
5
- from pydantic import Field
6
- from PIL import Image, ImageDraw, ImageFont
7
- from functools import lru_cache
8
- import gradio as gr
9
- from io import BytesIO
10
- import os
11
-
12
- # === Setup Inference Clients ===
13
- # Use your Hugging Face token if necessary:
14
- # client = InferenceClient(repo_id="model", token="YOUR_HF_TOKEN")
15
-
16
- image_client = InferenceClient("m-ric/text-to-image")
17
- text_client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
18
-
19
- # === LangChain wrapper using InferenceClient for text generation ===
20
- class InferenceClientLLM(BaseTool):
21
- name: str = "inference_text_generator"
22
- description: str = "Generate text using HF Inference API."
23
- client: InferenceClient = Field(default=text_client, exclude=True)
24
-
25
- def _run(self, prompt: str) -> str:
26
- print(f"[LLM] Generating text for prompt: {prompt}")
27
- response = self.client.text_generation(prompt)
28
- # response is usually a dict with 'generated_text'
29
- return response.get("generated_text", "")
30
-
31
- def _arun(self, prompt: str):
32
- raise NotImplementedError("Async not supported.")
33
-
34
- # === Image generation tool ===
35
- class TextToImageTool(BaseTool):
36
- name: str = "text_to_image"
37
- description: str = "Generate an image from a text prompt."
38
- client: InferenceClient = Field(default=image_client, exclude=True)
39
-
40
- def _run(self, prompt: str) -> Image.Image:
41
- print(f"[Image Tool] Generating image for prompt: {prompt}")
42
- image_bytes = self.client.text_to_image(prompt)
43
- return Image.open(BytesIO(image_bytes))
44
-
45
- def _arun(self, prompt: str):
46
- raise NotImplementedError("Async not supported.")
47
-
48
- # === Initialize tools ===
49
- text_to_image_tool = TextToImageTool()
50
- text_gen_tool = InferenceClientLLM()
51
- search_tool = DuckDuckGoSearchResults()
52
-
53
- # === Create agent ===
54
- agent = create_react_agent(llm=text_gen_tool, tools=[text_to_image_tool, search_tool])
55
- agent_executor = AgentExecutor(agent=agent, tools=[text_to_image_tool, search_tool], verbose=True)
56
-
57
- # === Image labeling ===
58
- def add_label_to_image(image, label):
59
- draw = ImageDraw.Draw(image)
60
- font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
61
- try:
62
- font = ImageFont.truetype(font_path, 30)
63
- except:
64
- font = ImageFont.load_default()
65
-
66
- text_width, text_height = draw.textsize(label, font=font)
67
- position = (image.width - text_width - 20, image.height - text_height - 20)
68
- rect_position = [position[0] - 10, position[1] - 10, position[0] + text_width + 10, position[1] + text_height + 10]
69
- draw.rectangle(rect_position, fill=(0, 0, 0, 128))
70
- draw.text(position, label, fill="white", font=font)
71
- return image
72
-
73
- # === Prompt generation with caching ===
74
- @lru_cache(maxsize=128)
75
- def generate_prompts_for_object(object_name):
76
- return {
77
- "past": f"Show an old version of a {object_name} from its early days.",
78
- "present": f"Show a {object_name} with current features/design/technology.",
79
- "future": f"Show a futuristic version of a {object_name}, predicting future features/designs.",
80
- }
81
-
82
- # === Cache generated images ===
83
- @lru_cache(maxsize=64)
84
- def generate_image_for_prompt(prompt, label):
85
- img = text_to_image_tool._run(prompt)
86
- return add_label_to_image(img, label)
87
-
88
- # === Main generation function ===
89
- def generate_object_history(object_name: str):
90
- prompts = generate_prompts_for_object(object_name)
91
- images = []
92
- file_paths = []
93
-
94
- for period, prompt in prompts.items():
95
- label = f"{object_name} - {period.capitalize()}"
96
- labeled_image = generate_image_for_prompt(prompt, label)
97
-
98
- file_path = f"/tmp/{object_name}_{period}.png"
99
- labeled_image.save(file_path)
100
- images.append((file_path, label))
101
- file_paths.append(file_path)
102
-
103
- # Create GIF
104
- gif_path = f"/tmp/{object_name}_evolution.gif"
105
- pil_images = [Image.open(p) for p in file_paths]
106
- pil_images[0].save(gif_path, save_all=True, append_images=pil_images[1:], duration=1000, loop=0)
107
-
108
- return images, gif_path
109
-
110
- # === Gradio UI ===
111
- def create_gradio_interface():
112
- with gr.Blocks() as demo:
113
- gr.Markdown("# TimeMetamorphy: Evolution Visualizer")
114
-
115
- with gr.Row():
116
- with gr.Column():
117
- object_input = gr.Textbox(label="Enter Object (e.g., car, phone)")
118
- generate_button = gr.Button("Generate Evolution")
119
- gallery = gr.Gallery(label="Generated Images").style(grid=3)
120
- gif_display = gr.Image(label="Generated GIF")
121
-
122
- generate_button.click(fn=generate_object_history, inputs=object_input, outputs=[gallery, gif_display])
123
-
124
- return demo
125
-
126
- # === Launch app ===
127
- if __name__ == "__main__":
128
- demo = create_gradio_interface()
129
- demo.launch(share=True)