import mysql.connector from mysql.connector import errorcode import os import logging import time import hashlib import datetime import gradio as gr import csv from urllib.parse import urlparse from selenium import webdriver from selenium.webdriver.chrome.service import Service from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from transformers import pipeline import feedparser from bs4 import BeautifulSoup import threading # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # Define constants DEFAULT_FILE_PATH = "scraped_data" PURPOSE = f"You go to Culvers sites, you continuously seek changes on them since your last observation. Anything new that gets logged and dumped into csv, stored in your log folder at user/app/scraped_data." HISTORY = [] CURRENT_TASK = None STOP_THREADS = False # Define database configuration db_config = { 'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'), 'host': os.getenv('DB_HOST'), 'raise_on_warnings': True } # Define a function to initialize the database def initialize_database(config): try: cnx = mysql.connector.connect(**config) cursor = cnx.cursor() # Create database if it doesn't exist cursor.execute("CREATE DATABASE IF NOT EXISTS scraper_db") cnx.database = 'scraper_db' # Create tables TABLES = {} TABLES['scraped_data'] = ( "CREATE TABLE IF NOT EXISTS scraped_data (" " id INT AUTO_INCREMENT PRIMARY KEY," " url VARCHAR(255) NOT NULL," " content_hash VARCHAR(64) NOT NULL," " change_detected DATETIME NOT NULL" ") ENGINE=InnoDB" ) for table_name in TABLES: table_description = TABLES[table_name] try: cursor.execute(table_description) logging.info(f"Table `{table_name}` created successfully.") except mysql.connector.Error as err: if err.errno == errorcode.ER_TABLE_EXISTS_ERROR: logging.warning(f"Table `{table_name}` already exists.") else: logging.error(err.msg) cursor.close() cnx.close() logging.info("Database initialization complete.") except mysql.connector.Error as err: logging.error(f"Database initialization failed: {err}") # Define a function to start scraping def start_scraping(storage_location, urls, scrape_interval, content_type, db_config): global CURRENT_TASK, HISTORY, STOP_THREADS CURRENT_TASK = f"Monitoring URLs: {', '.join(urls)}" HISTORY.append(f"Task started: {CURRENT_TASK}") for url in urls: # Create a folder for the URL hostname = urlparse(url).hostname folder_path = os.path.join(storage_location, hostname) os.makedirs(folder_path, exist_ok=True) # Log the initial observation try: with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager().install()), options=Options()) as driver: driver.get(url) time.sleep(2) # Wait for the page to load if content_type == "text": initial_content = driver.page_source elif content_type == "media": initial_content = driver.find_elements(By.TAG_NAME, "img") else: initial_content = driver.page_source initial_hash = hashlib.md5(str(initial_content).encode('utf-8')).hexdigest() HISTORY.append(f"Initial observation at {url}: {initial_hash}") with open(os.path.join(folder_path, f"{hostname}_initial_observation.txt"), "w") as file: file.write(f"Initial observation at {url}: {initial_hash}") except (NoSuchElementException, Exception) as e: HISTORY.append(f"Error accessing {url}: {e}") # Start a new thread for monitoring URLs threading.Thread(target=monitor_urls, args=(storage_location, [url], scrape_interval, content_type, [STOP_THREADS], db_config)).start() return f"Started scraping {', '.join(urls)} every {scrape_interval} minutes." # Define a function to monitor URLs for changes def monitor_urls(storage_location, urls, scrape_interval, content_type, stop_scraping_flag, db_config): global HISTORY previous_hashes = {url: "" for url in urls} try: cnx = mysql.connector.connect(**db_config) cursor = cnx.cursor() with webdriver.Chrome(service=Service(webdriver.ChromeDriverManager ().install()), options=Options()) as driver: while not stop_scraping_flag[0]: for url in urls: try: driver.get(url) time.sleep(2) # Wait for the page to load if content_type == "text": current_content = driver.page_source elif content_type == "media": current_content = driver.find_elements(By.TAG_NAME, "img") else: current_content = driver.page_source current_hash = hashlib.md5(str(current_content).encode('utf-8')).hexdigest() if current_hash != previous_hashes[url]: previous_hashes[url] = current_hash date_time = datetime.datetime.now() HISTORY.append(f"Change detected at {url} on {date_time}") # Insert into MySQL add_change = ("INSERT INTO scraped_data " "(url, content_hash, change_detected) " "VALUES (%s, %s, %s)") data_change = (url, current_hash, date_time) cursor.execute(add_change, data_change) cnx.commit() logging.info(f"Change detected and logged for {url} at {date_time}") except (NoSuchElementException, Exception) as e: logging.error(f"Error accessing {url}: {e}") time.sleep(scrape_interval * 60) # Check every scrape_interval minutes except Exception as e: logging.error(f"Error in monitor_urls: {e}") finally: cursor.close() cnx.close() # Define a function to stop scraping def stop_scraping(): global STOP_THREADS STOP_THREADS = True return "Scraping stopped." # Define a function to generate RSS feed def generate_rss_feed(selected_url, db_config): try: cnx = mysql.connector.connect(**db_config) cursor = cnx.cursor(dictionary=True) query = ("SELECT content_hash, change_detected FROM scraped_data " "WHERE url = %s ORDER BY change_detected DESC LIMIT 10") cursor.execute(query, (selected_url,)) items = cursor.fetchall() rss_items = "" for item in items: rss_items += f""" Change Detected {selected_url} Change detected on {item['change_detected'].strftime('%Y-%m-%d %H:%M:%S')} {item['change_detected'].strftime('%a, %d %b %Y %H:%M:%S +0000')} """ rss_feed = f""" RSS Feed for {selected_url} {selected_url} Latest changes detected on {selected_url}. {rss_items} """ cursor.close() cnx.close() return rss_feed except mysql.connector.Error as err: logging.error(f"Error generating RSS feed: {err}") return "Failed to generate RSS feed." # Define a function to handle messages def handle_message(message, chat_history, system_message, max_tokens, temperature, top_p): chat_history.append((message, system_message)) response = f"Received message: {message}" return chat_history, response # Define the Gradio interface def create_interface(): with gr.Blocks() as demo: with gr.Row(): with gr.Column(): message = gr.Textbox(label="Message") system_message = gr.Textbox(value="You are a helpful assistant.", label="System message") max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens") temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature") top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") storage_location = gr.Textbox(value="scraped_data", label="Storage Location") urls = gr.Textbox(label="URLs (comma separated)") scrape_interval = gr.Slider(minimum=1, maximum=60, value=5, step=1, label="Scrape Interval (minutes)") content_type = gr.Radio(choices=["text", "media", "both"], value="text", label="Content Type") start_button = gr.Button("Start Scraping") stop_button = gr.Button("Stop Scraping") csv_output = gr.Textbox(label="CSV Output", interactive=False) model_name_input = gr.Textbox(value="default_model", label="Model Name") gpu_layers_input = gr.Slider(minimum=0, maximum=8, value=2, step=1, label="GPU Layers") with gr.Column(): chat_history = gr.Chatbot(label="Chat History") response_box = gr.Textbox(label="Response") # Connect buttons to their respective functions start_button.click( fn=lambda storage, urls, interval, ctype: start_scraping( storage, urls.split(", "), interval, ctype, db_config ), inputs=[storage_location, urls, scrape_interval, content_type], outputs=csv_output ) stop_button.click(stop_scraping, outputs=csv_output) # Connect message submission to the chat interface message.submit(handle_message, inputs=[message, chat_history, system_message, max_tokens, temperature, top_p], outputs=[chat_history, response_box]) # Add a button to display the CSV content for a selected URL with gr.Row(): selected_url = gr.Textbox(label="Select URL for CSV Content") csv_button = gr.Button("Display CSV Content") csv_output = gr.Textbox(label="CSV Content Output", interactive=False) csv_button.click(display_csv, inputs=[selected_url], outputs=csv_output) # Add a button to display the RSS feed for a selected URL with gr.Row(): selected_url = gr.Textbox(label="Select URL for RSS Feed") rss_button = gr.Button("Generate RSS Feed") rss_output = gr.Textbox(label="RSS Feed Output", interactive=False) rss_button.click( generate_rss_feed, inputs=[selected_url, gr.State(db_config)], outputs=rss_output ) return demo # Initialize the database initialize_database(db_config) # Launch the Gradio interface demo = create_interface() demo.launch()