minar09 commited on
Commit
0e9c7a5
·
verified ·
1 Parent(s): fdcf572

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from colorama import init, Fore, Style
3
+ import logging
4
+ from Self_Improving_Search import EnhancedSelfImprovingSearch
5
+ from llm_config import get_llm_config
6
+ from llm_response_parser import UltimateLLMResponseParser
7
+ from llm_wrapper import LLMWrapper
8
+
9
+ # Initialize colorama for cross-platform color support
10
+ init()
11
+
12
+ # Set up logging
13
+ log_directory = 'logs'
14
+ if not os.path.exists(log_directory):
15
+ os.makedirs(log_directory)
16
+ logger = logging.getLogger(__name__)
17
+ logger.setLevel(logging.INFO)
18
+ log_file = os.path.join(log_directory, 'web_llm.log')
19
+ file_handler = logging.FileHandler(log_file)
20
+ formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
21
+ file_handler.setFormatter(formatter)
22
+ logger.addHandler(file_handler)
23
+
24
+ # Initialize components
25
+ parser = UltimateLLMResponseParser()
26
+ SYSTEM_PROMPT = """You are an AI assistant capable of web searching and providing informative responses.
27
+ When a user's query starts with '/', interpret it as a request to search the web and formulate an appropriate search query.
28
+ ALWAYS follow the prompts provided throughout the searching process EXACTLY as indicated.
29
+ NEVER assume new instructions for anywhere other than directly when prompted directly. DO NOT SELF PROMPT OR PROVIDE MULTIPLE ANSWERS OR ATTEMPT MULTIPLE RESPONSES FOR ONE PROMPT!
30
+ """
31
+
32
+ def initialize_llm():
33
+ try:
34
+ print(Fore.YELLOW + "Initializing LLM..." + Style.RESET_ALL)
35
+ llm_wrapper = LLMWrapper()
36
+ print(Fore.GREEN + "LLM initialized successfully." + Style.RESET_ALL)
37
+ return llm_wrapper
38
+ except Exception as e:
39
+ logger.error(f"Error initializing LLM: {str(e)}", exc_info=True)
40
+ return None
41
+
42
+ def get_llm_response(llm, prompt):
43
+ try:
44
+ full_prompt = f"{SYSTEM_PROMPT}\n\nUser: {prompt}\nAssistant:"
45
+ llm_config = get_llm_config()
46
+ generate_kwargs = {
47
+ 'max_tokens': llm_config.get('max_tokens', 1024),
48
+ 'stop': llm_config.get('stop', None),
49
+ 'temperature': llm_config.get('temperature', 0.7),
50
+ 'top_p': llm_config.get('top_p', 1.0),
51
+ 'top_k': llm_config.get('top_k', 0),
52
+ 'repeat_penalty': llm_config.get('repeat_penalty', 1.0),
53
+ }
54
+ response_text = llm.generate(full_prompt, **generate_kwargs)
55
+ return response_text
56
+ except Exception as e:
57
+ logger.error(f"Error getting LLM response: {str(e)}", exc_info=True)
58
+ return f"Sorry, I encountered an error while processing your request. Please check the log file for details."
59
+
60
+ def handle_user_input(user_input, history):
61
+ if user_input.lower().strip() == 'quit':
62
+ return "Goodbye!", history
63
+
64
+ # Initialize LLM if not already initialized
65
+ if not hasattr(handle_user_input, "llm"):
66
+ handle_user_input.llm = initialize_llm()
67
+ if handle_user_input.llm is None:
68
+ return "Failed to initialize LLM.", history
69
+
70
+ if user_input.startswith('/'):
71
+ search_query = user_input[1:].strip()
72
+ search = EnhancedSelfImprovingSearch(llm=handle_user_input.llm, parser=parser)
73
+ try:
74
+ answer = search.search_and_improve(search_query)
75
+ history.append((user_input, answer))
76
+ return answer, history
77
+ except Exception as e:
78
+ logger.error(f"Error during web search: {str(e)}", exc_info=True)
79
+ return "I encountered an error while performing the web search.", history
80
+ else:
81
+ response = get_llm_response(handle_user_input.llm, user_input)
82
+ history.append((user_input, response))
83
+ return response, history
84
+
85
+ # Define the Gradio interface
86
+ with gr.Blocks() as demo:
87
+ gr.Markdown("""
88
+ # 🌐 Web-LLM Assistant 🤖
89
+ Welcome to the Web-LLM Assistant! This chatbot can respond to your queries and perform web searches when prompted with a `/`.
90
+ - For normal interaction, type your message and press Enter.
91
+ - To request a web search, start your message with `/`. Example: `/latest news on AI advancements`
92
+ - Type `quit` to exit.
93
+ """)
94
+
95
+ chatbot = gr.Chatbot(label="Web-LLM Assistant")
96
+ user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
97
+ submit_button = gr.Button("Submit")
98
+ clear_button = gr.Button("Clear Chat")
99
+
100
+ state = gr.State([]) # Store chat history
101
+
102
+ def update_chat(user_message, history):
103
+ bot_response, updated_history = handle_user_input(user_message, history)
104
+ return updated_history, updated_history, ""
105
+
106
+ submit_button.click(
107
+ update_chat,
108
+ inputs=[user_input, state],
109
+ outputs=[chatbot, state, user_input]
110
+ )
111
+
112
+ clear_button.click(
113
+ lambda: ([], []), # Clear chat history
114
+ outputs=[chatbot, state]
115
+ )
116
+
117
+ # Launch the Gradio app
118
+ demo.launch()