File size: 5,168 Bytes
04a0510
380991e
 
 
 
04a0510
fd6c6c7
380991e
 
fd6c6c7
0401525
 
 
380991e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0401525
380991e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0401525
a3fb953
714c540
 
 
 
 
 
 
0401525
714c540
 
 
 
 
 
a3fb953
714c540
 
 
 
a3fb953
714c540
0401525
 
20d2697
585a4e3
0401525
 
 
714c540
0401525
 
 
 
 
380991e
 
 
 
04a0510
 
0401525
20d2697
0401525
20d2697
0401525
 
 
 
 
 
 
714c540
 
 
 
 
 
 
 
0401525
714c540
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import gradio as gr
import os
import json
from bs4 import BeautifulSoup
import requests
from huggingface_hub import InferenceClient

# Define global variables
BOT_AVATAR = 'https://automatedstockmining.org/wp-content/uploads/2024/08/south-west-value-mining-logo.webp'
hf_token = os.getenv("HF_TOKEN")

client = InferenceClient(token=hf_token)

custom_css = '''
.gradio-container {
    font-family: 'Roboto', sans-serif;
}
.main-header {
    text-align: center;
    color: #4a4a4a;
    margin-bottom: 2rem;
}
.tab-header {
    font-size: 1.2rem;
    font-weight: bold;
    margin-bottom: 1rem;
}
.custom-chatbot {
    border-radius: 10px;
    box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.custom-button {
    background-color: #3498db;
    color: white;
    border: none;
    padding: 10px 20px;
    border-radius: 5px;
    cursor: pointer;
    transition: background-color 0.3s ease;
}
.custom-button:hover {
    background-color: #2980b9;
}
'''

def extract_text_from_webpage(html):
    soup = BeautifulSoup(html, "html.parser")
    for script in soup(["script", "style"]):
        script.decompose()
    visible_text = soup.get_text(separator=" ", strip=True)
    return visible_text

def search(query):
    term = query
    max_chars_per_page = 8000
    all_results = []

    with requests.Session() as session:
        try:
            resp = session.get(
                url="https://www.google.com/search",
                headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
                params={"q": term, "num": 4},
                timeout=5
            )
            resp.raise_for_status()

            soup = BeautifulSoup(resp.text, "html.parser")
            result_block = soup.find_all("div", attrs={"class": "g"})

            for result in result_block:
                link = result.find("a", href=True)
                if link:
                    link = link["href"]
                    try:
                        webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0"}, timeout=5)
                        webpage.raise_for_status()

                        visible_text = extract_text_from_webpage(webpage.text)
                        if len(visible_text) > max_chars_per_page:
                            visible_text = visible_text[:max_chars_per_page]

                        all_results.append({"link": link, "text": visible_text})

                    except requests.exceptions.RequestException as e:
                        print(f"Failed to retrieve {link}: {e}")
                        all_results.append({"link": link, "text": None})
        except requests.exceptions.RequestException as e:
            print(f"Google search failed: {e}")

    return all_results

def process_query(user_input, history):
    
    # Accumulate streamed content from the initial request
    stream_search = client.chat_completion(
        model="Qwen/Qwen2.5-72B-Instruct",
        messages=[{"role": "user", "content": f"Based on this chat history {history} and the user's request '{user_input}', suggest a Google search term in a single line without assuming any specific dates (use 'this year', 'this month', etc.)"}],
        max_tokens=400,
        stream=True
    )
    
    # Accumulate content from the streamed response
    streamed_search_query = ""
    for chunk in stream_search:
        content = chunk.choices[0].delta.content or ''
        streamed_search_query += content

    print("Search Query:", streamed_search_query)  # Debugging: Check the final search term

    # Perform the web search based on the accumulated query
    search_results = search(streamed_search_query)
    search_results_str = json.dumps(search_results)
    
    # Create the response request with HuggingFace using search results
    response = client.chat_completion(
        model="Qwen/Qwen2.5-72B-Instruct",
        messages=[{"role": "user", "content": f"YOU ARE IM.S, AN INVESTMENT CHATBOT BUILT BY automatedstockmining.org. Answer the user's request '{user_input}' using the following information: {search_results_str} and the chat history {history}. Provide a concise, direct answer in no more than 2-3 sentences, with appropriate emojis. If the user asks for a detailed analysis, generate up to 3000 tokens analyzing all trends and patterns."}],
        max_tokens=3000,
        stream=True
    )

    # Accumulate the final response and stream it
    final_response = ""
    for chunk in response:
        content = chunk.choices[0].delta.content or ''
        final_response += content
        yield final_response  # Yield the accumulated response for real-time streaming

theme = gr.themes.Citrus(
    primary_hue="blue",
    neutral_hue="slate",
)

examples = [
    ["What's the current price of bitcoin"],
    ["What's the latest news on Cisco Systems stock"],
    # More examples...
]

chatbot = gr.Chatbot(
    label="IM.S",
    avatar_images=[None, BOT_AVATAR],
    show_copy_button=True,
    layout="panel",
    height=700
)

chat_interface = gr.ChatInterface(
    theme=theme,
    fn=process_query,
    chatbot=chatbot,
    examples=examples,
)

chat_interface.launch()