File size: 4,967 Bytes
22e1b62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os

import gradio as gr
import openai
import requests
from PIL import Image
import re

from src.application.url_reader import URLReader

OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
openai.api_key = os.getenv('OPENAI_API_KEY')
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
SEARCH_ENGINE_ID = os.getenv('SEARCH_ENGINE_ID')

def load_url(url):
    """
    Load content from the given URL.
    """
    content = URLReader(url)
    image = None
    header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'}
    try:
        response = requests.get(
            url, 
            headers = header,
            stream = True
        )
        response.raise_for_status()  # Raise an exception for bad status codes
        
        image_response = requests.get(content.top_image, stream=True)
        try:
            image = Image.open(image_response.raw)
        except:
            print(f"Error loading image from {content.top_image}")
            
    except (requests.exceptions.RequestException, FileNotFoundError) as e:
        print(f"Error fetching image: {e}")

    return content.title, content.text, image


def replace_terms(text, input_term, destination_term):
    # Replace input_term with destination_term in the text
    modified_text = re.sub(input_term, destination_term, text) 
    return modified_text

def generate_content(model1, model2, title, content):
    # Generate text using the selected models
    full_content = ""
    input_type = ""
    if title and content:
        full_content = title + "\n" + content
        input_type = "title and content"
    elif title:
        full_content = title
        input_type = "title"
    elif content:
        full_content = title
        input_type = "content"
        
def generate_text(model, full_context, input_type):
    # Generate text using the selected model
    if input_type == "":
        prompt = "Generate a random fake news article"
    else:
        prompt = f"Generate a fake news article (title and content) based on the following {input_type}: {full_context}"
        
    try:
        response = openai.ChatCompletion.create(
        model=model, 
        messages=[
            {"role": "user", "content": prompt}
        ]
        )
        return response.choices[0].message.content

    except openai.error.OpenAIError as e:
        print(f"Error interacting with OpenAI API: {e}")
        return "An error occurred while processing your request."

# Define the GUI
with gr.Blocks() as demo:
    gr.Markdown("# Fake News Detection")

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("## Settings")
            gr.Markdown("This tool generates fake news by modifying the content of a given URL.")

            with gr.Accordion("1. Enter a URL"):
                #gr.Markdown("  1. Enter a URL.")
                url_input = gr.Textbox(
                    label="URL",
                    value="https://bbc.com/future/article/20250110-how-often-you-should-wash-your-towels-according-to-science",
                    )
                load_button = gr.Button("Load an URL...")
                
            with gr.Accordion("2. Select a content-generation model", open=True):
                with gr.Row():
                    model1_dropdown = gr.Dropdown(choices=["GPT 4o", "GPT 4o-mini"], label="Text-generation model")
                    model2_dropdown = gr.Dropdown(choices=["Dall-e", "Stable Diffusion"], label="Image-generation model")
                generate_button = gr.Button("Random generation...")

            with gr.Accordion("3. Replace any terms", open=True):
                with gr.Row():
                    input_term_box = gr.Textbox(label="Input Term")
                    destination_term_box = gr.Textbox(label="Destination Term")
                replace_button = gr.Button("Replace term...")
            
            process_button = gr.Button("Process")

        with gr.Column(scale=2):
            gr.Markdown("## News contents")
            title_input = gr.Textbox(label="Title", value="")
            with gr.Row():
                image_view = gr.Image(label="Image") 
                content_input = gr.Textbox(label="Content", value="", lines=15)

    

    # Connect events
    load_button.click(
        load_url, 
        inputs=url_input, 
        outputs=[title_input, content_input, image_view]
        )
    replace_button.click(replace_terms, 
                        inputs=[content_input, input_term_box, destination_term_box], 
                        outputs=content_input)
    process_button.click(generate_text, 
                        inputs=[url_input, model1_dropdown, model2_dropdown, input_term_box, destination_term_box, title_input, content_input], 
                        outputs=[title_input, content_input])

    #url_input.change(load_image, inputs=url_input, outputs=image_view) 

demo.launch()