File size: 7,645 Bytes
62cc7a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import datetime
import os
import csv
import time
import hashlib
import logging
import gradio as gr
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from huggingface_hub import InferenceClient
import random
import yaml

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Define constants
DATE_TIME_STR = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
PURPOSE = f"You go to Culvers sites, you continuously seek changes on them since your last observation. Anything new that gets logged and dumped into csv, stored in your log folder at user/app/scraped_data."
HISTORY = []
CURRENT_TASK = None
DEFAULT_FILE_PATH = "user/app/scraped_data/culver/culvers_changes.csv"

# Ensure the directory exists
os.makedirs(os.path.dirname(DEFAULT_FILE_PATH), exist_ok=True)

# Function to monitor URLs for changes
def monitor_urls(storage_location, urls, scrape_interval, content_type):
    global HISTORY
    previous_hashes = [""] * len(urls)

    try:
        with webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=Options()) as driver:
            while True:
                for i, url in enumerate(urls):
                    try:
                        driver.get(url)
                        time.sleep(2)  # Wait for the page to load
                        if content_type == "text":
                            current_content = driver.page_source
                        elif content_type == "media":
                            current_content = driver.find_elements_by_tag_name("img")
                        else:
                            current_content = driver.page_source
                        current_hash = hashlib.md5(str(current_content).encode('utf-8')).hexdigest()
                        if current_hash != previous_hashes[i]:
                            previous_hashes[i] = current_hash
                            date_time_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                            HISTORY.append(f"Change detected at {url} on {date_time_str}")
                            with open(storage_location, "a", newline="") as csvfile:
                                csv_writer = csv.DictWriter(csvfile, fieldnames=["date", "time", "url", "change"])
                                csv_writer.writerow({"date": date_time_str.split()[0], "time": date_time_str.split()[1], "url": url, "change": "Content changed"})
                                logging.info(f"Change detected at {url} on {date_time_str}")
                    except Exception as e:
                        logging.error(f"Error accessing {url}: {e}")
                time.sleep(scrape_interval * 60)  # Check every scrape_interval minutes
    except Exception as e:
        logging.error(f"Error starting ChromeDriver: {e}")

# Define main function to handle user input
def handle_input(storage_location, urls, scrape_interval, content_type):
    global CURRENT_TASK, HISTORY

    CURRENT_TASK = f"Monitoring URLs: {', '.join(urls)}"
    HISTORY.append(f"Task started: {CURRENT_TASK}")
    monitor_urls(storage_location, urls, scrape_interval, content_type)
    return TASK_PROMPT.format(task=CURRENT_TASK, history="\n".join(map(str, HISTORY)))

# Load custom prompts
try:
    with open("custom_prompts.yaml", "r") as fp:
        custom_prompts = yaml.safe_load(fp)
except FileNotFoundError:
    custom_prompts = {"WEB_DEV": "", "AI_SYSTEM_PROMPT": "", "PYTHON_CODE_DEV": "", "CODE_GENERATION": "", "CODE_INTERPRETATION": "", "CODE_TRANSLATION": "", "CODE_IMPLEMENTATION": ""}

# Define agents
AGENTS = ["WEB_DEV", "AI_SYSTEM_PROMPT", "PYTHON_CODE_DEV", "CODE_GENERATION", "CODE_INTERPRETATION", "CODE_TRANSLATION", "CODE_IMPLEMENTATION"]

# Define the Mistral inference client
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

# Define the chat response function
def respond(message, history, system_message, max_tokens, temperature, top_p):
    return generate(message, history, system_message, max_tokens, temperature, top_p)

def start_scraping(storage_location, url1, url2, url3, url4, url5, url6, url7, url8, url9, url10, scrape_interval, content_type):
    urls = [url for url in [url1, url2, url3, url4, url5, url6, url7, url8, url9, url10] if url]
    handle_input(storage_location, urls, scrape_interval, content_type)
    # Start transaction
    inspector.start_transaction('start_scraping')
    # Scrape data
    while True:
        # Check for scrape_interval
        time.sleep(scrape_interval * 60)  # Check every scrape_interval minutes
    # End transaction
    inspector.end_transaction()
    return f"Started scraping {', '.join(urls)} every {scrape_interval} minutes."

# Function to display CSV content
def display_csv(storage_location):
    if os.path.exists(storage_location):
        with open(storage_location, "r") as file:
            return file.read()
    else:
        return "No data available."

# Create Gradio interface
def chat_interface(message, system_message, max_tokens, temperature, top_p, storage_location, url1, url2, url3, url4, url5, url6, url7, url8, url9, url10, scrape_interval, content_type):
    global HISTORY
    response = respond(message, HISTORY, system_message, max_tokens, temperature, top_p)
    HISTORY.append((message, response))
    return HISTORY, ""

demo = gr.Blocks()

with demo:
    with gr.Row():
        with gr.Column():
            message = gr.Textbox(label="Message")
            system_message = gr.Textbox(value="You are a friendly Chatbot.", label="System message")
            max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
            temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
            top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
            storage_location = gr.Textbox(value=DEFAULT_FILE_PATH, label="Storage Location")
            url1 = gr.Textbox(value="https://www.culver.k12.in/", label="URL 1")
            url2 = gr.Textbox(value="https://www.facebook.com/CulverCommunitySchools", label="URL 2")
            url3 = gr.Textbox(label="URL 3")
            url4 = gr.Textbox(label="URL 4")
            url5 = gr.Textbox(label="URL 5")
            url6 = gr.Textbox(label="URL 6")
            url7 = gr.Textbox(label="URL 7")
            url8 = gr.Textbox(label="URL 8")
            url9 = gr.Textbox(label="URL 9")
            url10 = gr.Textbox(label="URL 10")
            scrape_interval = gr.Slider(minimum=1, maximum=60, value=5, step=1, label="Scrape Interval (minutes)")
            content_type = gr.Radio(choices=["text", "media", "both"], value="text", label="Content Type")
            start_button = gr.Button("Start Scraping")
            csv_output = gr.Textbox(label="CSV Output", interactive=False)
        
        with gr.Column():
            chat_history = gr.Chatbot(label="Chat History")
            response_box = gr.Textbox(label="Response")
    
    start_button.click(start_scraping, inputs=[storage_location, url1, url2, url3, url4, url5, url6, url7, url8, url9, url10, scrape_interval, content_type], outputs=csv_output)
    message.submit(chat_interface, inputs=[message, system_message, max_tokens, temperature, top_p, storage_location, url1, url2, url3, url4, url5, url6, url7, url8, url9, url10, scrape_interval, content_type], outputs=[chat_history, response_box])

if __name__ == "__main__":
    demo.launch()