filename
stringclasses 3
values | content
stringclasses 3
values |
---|---|
tool_image_analysis.py | import os
import streamlit as st
from pathlib import Path
from dotenv import load_dotenv
from huggingface_hub import InferenceClient
import base64
import requests
# Define the tool name
TOOL_NAME = "Image Analysis"
class ToolClass:
def __init__(self):
"""
Load environment variables, config, set up file paths, etc.
"""
load_dotenv("/workspaces/wakin_bake/.env")
# Hugging Face token from .env
self.hf_token = os.getenv("SAMBANOVA")
if not self.hf_token:
st.error("HF_TOKEN is not set in your .env file. Please add it.")
# Path to the file containing image URLs to analyze
self.image_urls_file = "config/all_image_urls.txt"
# (Change this path if needed.)
# Optionally define or load your model name
self.model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct"
self.max_tokens = 500
# Initialize Hugging Face Inference Client
self.client = InferenceClient(
provider="sambanova",
api_key=self.hf_token
)
############################################################################
# Internal Helper: load image URLs from file
############################################################################
def _load_image_urls(self, file_path: str):
file_path_obj = Path(file_path)
if not file_path_obj.exists():
st.error(f"Image URLs file not found: {file_path_obj}")
return []
urls = []
with file_path_obj.open("r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
urls.append(line)
return urls
############################################################################
# Internal Helper: load prompt from file
############################################################################
def _load_prompt_from_file(self, prompt_file: str) -> str:
"""Helper to read a prompt from a text file if it exists."""
try:
with open(prompt_file, "r", encoding="utf-8") as f:
return f.read().strip()
except Exception as e:
st.error(f"Error reading prompt file: {e}")
return ""
############################################################################
# Internal Helper: Convert image to Base64
############################################################################
def _image_to_base64(self, image_url: str) -> str:
"""
Convert an image from URL to Base64 format.
"""
try:
# Download the image
response = requests.get(image_url)
response.raise_for_status()
# Convert to Base64
encoded_image = base64.b64encode(response.content).decode('utf-8')
return f"data:image/png;base64,{encoded_image}"
except Exception as e:
st.error(f"Error converting image to Base64: {e}")
return ""
############################################################################
# Internal Helper: stream image analysis for a single image
############################################################################
def _stream_image_analysis(self, image_url: str, user_prompt: str) -> str:
"""
Create a chat message with text + image, then stream partial AI response.
Return the final text once streaming is complete.
"""
image_base64 = self._image_to_base64(image_url)
if not image_base64:
return "Error processing image."
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": user_prompt},
{"type": "image_url", "image_url": {"url": image_base64}},
],
}
]
full_response = ""
try:
with st.chat_message("assistant"):
message_placeholder = st.empty()
stream = self.client.chat.completions.create(
model=self.model_name,
messages=messages,
max_tokens=self.max_tokens,
stream=True,
)
# Use st.write_stream instead of manual streaming
response = st.write_stream(stream)
full_response = response
# No need to update session_state here to avoid duplication
except Exception as e:
error_msg = f"Error analyzing image {image_url}: {e}"
st.error(error_msg)
return error_msg
return full_response
############################################################################
# PUBLIC: run_pipeline
############################################################################
def run_pipeline(self, user_prompt: str, profile: str = None) -> str:
"""
Public entry point for the "Image Analysis" tool.
1) Reads image URLs from file.
2) For each image, streams partial AI analysis using HF chat endpoint.
3) Displays results in real-time.
"""
# Initialize messages in session state if not already present
if "messages" not in st.session_state:
st.session_state.messages = []
user_prompt = user_prompt.strip()
# Check if user_prompt looks like a .txt file path and load prompt from file
if user_prompt.endswith(".txt") and os.path.exists(user_prompt):
loaded_prompt = self._load_prompt_from_file(user_prompt)
if loaded_prompt:
user_prompt = loaded_prompt
else:
return "Failed to load prompt from file."
if not user_prompt:
return "No prompt provided."
# 1) Load image URLs
image_urls = self._load_image_urls(self.image_urls_file)
if not image_urls:
return "No image URLs found to analyze."
# 2) Analyze each image and stream the response
last_analysis = ""
for image_url in image_urls:
st.markdown(f"### Analyzing Image: {image_url}")
# Add user message to session state before streaming response
st.session_state.messages.append(
{"role": "user", "content": f"Analyze: {image_url}"})
# Get analysis
analysis_text = self._stream_image_analysis(image_url, user_prompt)
# Add assistant response to session state after streaming is complete
st.session_state.messages.append(
{"role": "assistant", "content": analysis_text})
last_analysis = analysis_text
return last_analysis
|
tool_image_generation.py | import os
import re
import streamlit as st
import io
from huggingface_hub import InferenceClient
from dotenv import load_dotenv
from datetime import datetime
import requests
from jinja2 import Template
import cv2
import numpy as np
from PIL import Image
# Define the tool name
TOOL_NAME = "Image Generation"
class ToolClass:
def __init__(self):
load_dotenv("/workspaces/wakin_bake/.env")
self.api_key = os.getenv("SAMBANOVA")
self.api_key1 = os.getenv("FAL")
if not self.api_key:
st.error(
"API key not found. Add HF_API_KEY=your_api_key to your .env file."
)
st.stop()
self.client_text = InferenceClient(
provider="sambanova",
api_key=self.api_key
)
self.client_image = InferenceClient(
provider="fal-ai",
api_key=self.api_key1
)
self.template_folder = "templates/j2/create_image"
self.output_response_file = "output/image_generation/ai_response.txt"
if "messages" not in st.session_state:
st.session_state.messages = []
if "generation_history" not in st.session_state:
st.session_state.generation_history = []
def sanitize_input(self, text: str) -> str:
sanitized = re.sub(r"[^\x20-\x7E]+", " ", text)
return sanitized.strip()
def load_template(self, template_name: str) -> str:
template_file_path = os.path.join(self.template_folder, template_name)
with open(template_file_path, "r") as file:
return file.read()
def generate_text_response(self, prompt: str) -> str:
messages = [{"role": "user", "content": prompt}]
full_response = ""
try:
stream = self.client_text.chat.completions.create(
model="meta-llama/Llama-3.3-70B-Instruct",
messages=messages,
max_tokens=1500,
stream=True,
)
for chunk in stream:
chunk_text = chunk.choices[0].delta.content or ""
full_response += chunk_text
return full_response
except Exception as e:
st.error(f"Error generating AI response: {e}")
return ""
class ImageGenerator:
def __init__(self, api_key: str):
self.client = InferenceClient(
provider="fal-ai",
api_key=api_key
)
def generate_image(self, prompt: str) -> Image:
# Use the InferenceClient to generate the image
image = self.client.text_to_image(
prompt,
model="black-forest-labs/FLUX.1-dev"
)
return image
def convert_to_coloring_book(self, input_image: Image) -> np.ndarray:
gray_image = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray_image, threshold1=30, threshold2=100)
inverted_edges = cv2.bitwise_not(edges)
blank_image = np.ones_like(np.array(input_image)) * 255
coloring_book_image = cv2.bitwise_and(
blank_image, blank_image, mask=inverted_edges
)
return coloring_book_image
def run_pipeline(self, scene_input: str, selected_template: str) -> str:
"""Process user input and generate a response."""
if scene_input and selected_template:
template_content = self.load_template(selected_template)
rendered_prompt = Template(template_content).render(user_input=scene_input)
combined_prompt = f"{rendered_prompt}"
st.session_state.messages.append({"role": "user", "content": scene_input})
st.markdown("### Input Content:")
st.write(combined_prompt)
# Generate AI text response (streaming)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
try:
stream = self.client_text.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=[{"role": "user", "content": combined_prompt}],
max_tokens=1500,
stream=True,
)
for chunk in stream:
chunk_text = chunk.choices[0].delta.content or ""
full_response += chunk_text
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append(
{"role": "assistant", "content": full_response}
)
except Exception as e:
st.error(f"Error generating AI response: {e}")
return "Error generating AI response."
# Generate image from AI response
with st.status("Generating image..."):
try:
# Generate the image using the prompt response
image = self.ImageGenerator(self.api_key1).generate_image(full_response)
original_image_path = f"output/images/generated_image_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
os.makedirs("output/images", exist_ok=True)
image.save(original_image_path)
st.image(image, caption="Generated Image", width=300)
# Convert to coloring book style
coloring_book_image = self.convert_to_coloring_book(image)
coloring_book_path = f"output/images/colouring_book/coloring_book_image_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
os.makedirs("output/images/colouring_book", exist_ok=True)
cv2.imwrite(coloring_book_path, coloring_book_image)
st.image(
coloring_book_path, caption="Coloring Book Style", width=300
)
st.session_state.generation_history.append(
{
"timestamp": datetime.now().strftime("%Y%m%d_%H%M%S"),
"prompt": combined_prompt,
"original_image_path": original_image_path,
"coloring_book_image_path": coloring_book_path,
}
)
return "Image generated successfully."
except Exception as e:
st.error(f"Error generating image: {str(e)}")
return "Error generating image."
return "No image generated."
|
tool_websearch.py | import os
import streamlit as st
import requests
from huggingface_hub import InferenceClient
from duckduckgo_search import DDGS
from bs4 import BeautifulSoup
from pathlib import Path
from dotenv import load_dotenv
import json
##############################################################################
# The name that will appear in the dynamic dropdown
##############################################################################
TOOL_NAME = "Web Search"
##############################################################################
# A class that encapsulates the entire web-search pipeline:
# - Load query prompt from web.txt
# - Generate a refined query with streaming
# - Perform a DuckDuckGo search with the refined query
# - Scrape each search result
# - Load final prompt from final_response_prompt.txt
# - Summarize results with streaming—now including the web results
##############################################################################
class ToolClass:
def __init__(self):
"""
Loads environment configuration, sets up prompt file paths, etc.
"""
load_dotenv("/workspaces/wakin_bake/.env")
# HF API Key
hf_api_key = os.getenv("SAMBANOVA")
if not hf_api_key:
st.error("No HF_API_KEY found. Please set your Hugging Face key.")
self.inference_client = InferenceClient(
provider="sambanova", api_key=hf_api_key)
# Default streaming settings
self.query_max_tokens = 100
self.final_answer_max_tokens = 50000
# Model names
self.query_generation_model = "Qwen/Qwen2.5-Coder-32B-Instruct"
self.final_summarization_model = "meta-llama/Llama-3.3-70B-Instruct"
# File paths for your prompts
self.query_prompt_file = Path("templates/prompts/web_search/web.txt")
self.final_prompt_file = Path(
"templates/prompts/web_search/final_response_prompt.txt")
############################################################################
# HELPER: Streaming text from HF chat
############################################################################
def _stream_generation(self, messages, model_name, max_tokens) -> str:
"""
Streams partial text from the HF chat endpoint and returns
the final text as a string. Displays partial text in real time.
"""
partial_text = ""
placeholder = st.empty()
try:
stream = self.inference_client.chat.completions.create(
model=model_name,
messages=messages,
max_tokens=max_tokens,
stream=True
)
for chunk in stream:
delta = chunk.choices[0].delta.content
if delta:
partial_text += delta
placeholder.markdown(partial_text + "▌")
placeholder.markdown(partial_text) # Finalize display
except Exception as e:
st.error(f"Error streaming from HF: {e}")
return partial_text.strip()
############################################################################
# HELPER: Scrape a URL and extract text
############################################################################
def _scrape_url(self, url: str, max_chars=5000) -> str:
"""Fetch a webpage, extract text (up to max_chars), or return empty on error."""
try:
resp = requests.get(url, timeout=10)
resp.raise_for_status()
soup = BeautifulSoup(resp.text, "html.parser")
# Remove script and style elements
for script_or_style in soup(["script", "style"]):
script_or_style.decompose()
text = soup.get_text(separator=" ")
text = text.strip()
if len(text) > max_chars:
text = text[:max_chars] + " ...[TRUNCATED]"
return text
except Exception:
return ""
############################################################################
# HELPER: Load a text file (prompt)
############################################################################
def _load_prompt(self, file_path: Path) -> str:
if not file_path.exists():
st.error(f"Prompt file not found: {file_path}")
return ""
try:
with file_path.open("r", encoding="utf-8") as f:
return f.read()
except Exception as e:
st.error(f"Error reading prompt file {file_path}: {e}")
return ""
############################################################################
# MAIN: run_pipeline
############################################################################
def run_pipeline(self, user_text: str) -> str:
"""
1) Load query prompt from web.txt
2) Stream a refined query based on user input
3) Perform a DuckDuckGo search with the refined query
4) Scrape each search result
5) Load final prompt from final_response_prompt.txt
6) Summarize the results with streaming—now including the web results
7) Save the search data to both session state and a JSON file
8) Display search data in the chat interface
9) Return the final answer
"""
# 1) Load the query prompt
query_prompt_text = self._load_prompt(self.query_prompt_file)
if not query_prompt_text:
return "No query prompt loaded. Stopping."
# 2) Generate a refined query from the user input
st.markdown("**Generating refined query from user text...**")
query_messages = [
{"role": "user", "content": f"{query_prompt_text}\n\nUser Input: {user_text}"}
]
refined_query = self._stream_generation(
messages=query_messages,
model_name=self.query_generation_model,
max_tokens=self.query_max_tokens,
)
if not refined_query:
return "No query generated."
# 3) Perform DuckDuckGo search
st.markdown(f"**Performing DuckDuckGo search for:** `{refined_query}`")
results = []
try:
with DDGS() as ddgs:
for r in ddgs.text(refined_query, max_results=5):
results.append(r)
except Exception as e:
st.error(f"Web search error: {e}")
if not results:
return f"Refined Query: **{refined_query}**\n\nNo search results returned."
# 4) Scrape each link
st.markdown("**Scraping each link...**")
for item in results:
url = item.get("href", "")
if not url:
item["scraped_text"] = ""
continue
item["scraped_text"] = self._scrape_url(url)
# Build a text block that includes details of each result
harvested_results = ""
for idx, r in enumerate(results, start=1):
harvested_results += (
f"Result {idx}:\n"
f"Title: {r.get('title', '')}\n"
f"Link: {r.get('href', '')}\n"
f"Snippet: {r.get('body', '')}\n"
f"Scraped Text: {r.get('scraped_text', '')}\n\n"
)
# 5) Load the final summarization prompt
final_prompt_text = self._load_prompt(self.final_prompt_file)
if not final_prompt_text:
return (
f"Refined Query: **{refined_query}**\n\nNo final prompt loaded. Stopping."
)
# 6) Generate final answer with the web search results included
st.markdown("**Generating final answer from search results...**")
try:
formatted_prompt = final_prompt_text.format(user_input=user_text)
except Exception:
formatted_prompt = final_prompt_text
# Append the refined query and the harvested web results to the final prompt
final_content = (
formatted_prompt +
"\n\nRefined Query:\n" + refined_query +
"\n\nWeb Search Results:\n" + harvested_results
)
summarization_messages = [
{"role": "user", "content": final_content}
]
final_answer = self._stream_generation(
messages=summarization_messages,
model_name=self.final_summarization_model,
max_tokens=self.final_answer_max_tokens,
)
# 7) Save search data for later reference in both session state and a JSON file
save_data = {
"user_text": user_text,
"refined_query": refined_query,
"final_summary": final_answer,
}
if "search_results" not in st.session_state:
st.session_state["search_results"] = []
st.session_state["search_results"].append(save_data)
save_path = "output/web_search/search_results.json"
try:
if os.path.exists(save_path):
with open(save_path, "r", encoding="utf-8") as f:
existing_data = json.load(f)
else:
existing_data = []
existing_data.append(save_data)
with open(save_path, "w", encoding="utf-8") as f:
json.dump(existing_data, f, indent=4)
st.success(f"Saved search data to {save_path} and session state.")
except Exception as e:
st.error(f"Error saving search data: {e}")
# 8) Save scraped results to a separate file
scraped_save_path = "output/web_search/scraped_results.txt"
try:
with open(scraped_save_path, "a", encoding="utf-8") as f:
for item in results:
f.write(f"Title: {item.get('title', '')}\n")
f.write(f"Link: {item.get('href', '')}\n")
f.write(
f"Scraped Text: {item.get('scraped_text', '')}\n\n")
st.success(f"Saved scraped results to {scraped_save_path}")
except Exception as e:
st.error(f"Error saving scraped results: {e}")
# 9) Display the search data in an expander within the chat interface
with st.expander("View Search Data"):
st.markdown("### Latest Search Data")
st.markdown(f"**User Input:** {user_text}")
st.markdown(f"**Refined Query:** {refined_query}")
st.markdown("**Final Summary:**")
st.write(final_answer)
st.markdown("**Full Session State Data:**")
st.json(st.session_state["search_results"])
# 10) Return the final answer
return final_answer or "No final answer generated."
|
README.md exists but content is empty.
- Downloads last month
- 57