gperdrizet commited on
Commit
e1759ca
·
unverified ·
2 Parent(s): 4b5afa5 da09bef

Merge pull request #12 from gperdrizet/dev

Browse files
assets/html.py CHANGED
@@ -3,18 +3,58 @@
3
  TITLE = (
4
  '''
5
  <center>
6
- <h1>Agentic RSS reader</h1>
 
7
  </center>
8
  '''
9
  )
10
 
11
  DESCRIPTION = (
12
  '''
13
- <p>Uses sister Space
 
 
 
 
14
  <a href='https://huggingface.co/spaces/Agents-MCP-Hackathon/rss-mcp-server'>
15
- RSS feed reader</a> via MCP. Click 'Connect to MCP server' to get started.
16
- Check out the <a href='https://github.com/gperdrizet/MCP-hackathon/tree/main'>
 
 
17
  main project repo on GitHub</a>. Both Spaces by
18
  <a href=https://www.linkedin.com/in/gperdrizet/'>George Perdrizet</a>.</p>
 
 
 
 
 
 
 
 
 
 
 
 
19
  '''
20
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  TITLE = (
4
  '''
5
  <center>
6
+ <h1>RASS (retrieval augmented simple syndication) Agent</h1>
7
+ <h2>Agentic RSS feed reader</h2>
8
  </center>
9
  '''
10
  )
11
 
12
  DESCRIPTION = (
13
  '''
14
+ <p><b>Problem</b>: I love RSS feeds, but need help keeping up with all of the content from my subscriptions.
15
+
16
+ <b>Solution</b>: Build a tool to allow LLMs to find and interact with RSS feeds on behalf of the user.</p>
17
+ <h2>Introduction</h2>
18
+ <p>This demonstration uses sister space
19
  <a href='https://huggingface.co/spaces/Agents-MCP-Hackathon/rss-mcp-server'>
20
+ RSS feed reader</a> via MCP to interact with RSS feeds. Click 'Connect to MCP
21
+ server' to get started. If it takes a minute or two to reply, don't worry the inference
22
+ container was probably cold and spinning up. Check out the
23
+ <a href='https://github.com/gperdrizet/MCP-hackathon/tree/main'>
24
  main project repo on GitHub</a>. Both Spaces by
25
  <a href=https://www.linkedin.com/in/gperdrizet/'>George Perdrizet</a>.</p>
26
+
27
+ I love RSS feeds - they remind me of a time when the internet was a weird and
28
+ wonderful place, filled with interesting content hiding behind every link. The tools
29
+ to produce and navigate that content have improved by leaps and bounds. However,
30
+ the improvement has not come without some losses. Content often feels homogeneous and
31
+ it is too often painfully apparent that your favorite platform has a large degree of
32
+ control over what content you see and what content you don't.
33
+
34
+ This tool give the user back some of that control. It let's them decide what content
35
+ and sources they are interested in. I built it because I want access to diverse,
36
+ unfiltered publishing by many sources, paired modern AI to help me navigate it.
37
+ I want the model to help me ingest my feed, not create it for me!
38
  '''
39
  )
40
+
41
+ FEATURES_TOOLS ='''
42
+ ## Features
43
+
44
+ 1. Inference with Anthropic's efficient claude-3-haiku model.
45
+ 2. Custom MCP client with asynchronous server side events, retry and error handling based on the excellent repo by [Adel Zaalouk](https://github.com/zanetworker/mcp-playground/tree/main).
46
+ 3. Multi-turn re-prompting to allow LLM workflows with multiple tool calls.
47
+ 4. Queue and worker system to show user what's going on 'under the hood' while the model calls tools and generates replies.
48
+
49
+ ## Tools
50
+
51
+ 1. `get_feed()`: Given a website name or URL, find its RSS feed and
52
+ return recent article titles, links and a generated summary of content if
53
+ avalible. Caches results for fast retrieval by other tools. Embeds content
54
+ to vector database for subsequent RAG.
55
+ 2. `context_search()`: Vector search on article content for RAG context.
56
+ 3. `find_article()`: Uses vector search on article content to find title of article
57
+ that user is referring to.
58
+ 4. `get_summary()`: Gets article summary from Redis cache using article title.
59
+ 5. `get_link()`: Gets article link from Redis cache using article title.
60
+ '''
client/interface.py CHANGED
@@ -1,14 +1,13 @@
1
  '''Functions for controlling chat flow between Gradio and Anthropic/MCP'''
2
 
3
- import json
4
  import logging
5
  import queue
6
- from anthropic.types import text_block
7
  from gradio.components.chatbot import ChatMessage
8
 
9
  from client import prompts
10
  from client.anthropic_bridge import AnthropicBridge
11
  import client.gradio_functions as gradio_funcs
 
12
 
13
  # Create dialog logger
14
  dialog = gradio_funcs.get_dialog_logger(clear = True)
@@ -26,6 +25,13 @@ async def agent_input(
26
  reply = 'No reply from LLM'
27
 
28
  user_query = chat_history[-1]['content']
 
 
 
 
 
 
 
29
  dialog.info('User: %s', user_query)
30
 
31
  input_messages = format_chat_history(chat_history)
@@ -34,70 +40,29 @@ async def agent_input(
34
  input_messages
35
  )
36
 
37
- logger.debug(result)
38
-
39
  if result['tool_result']:
40
- tool_call = result['tool_call']
41
- tool_name = tool_call['name']
42
-
43
- if tool_name == 'rss_mcp_server_get_feed':
44
-
45
- tool_parameters = tool_call['parameters']
46
- website = tool_parameters['website']
47
- response_content = result['llm_response'].content[0]
48
-
49
- if isinstance(response_content, text_block.TextBlock):
50
- intermediate_reply = response_content.text
51
- else:
52
- intermediate_reply = f'I Will check the {website} RSS feed for you'
53
-
54
- output_queue.put(intermediate_reply)
55
- dialog.info('LLM: %s', intermediate_reply)
56
- dialog.info('LLM: called %s on %s', tool_name, website)
57
-
58
- articles = json.loads(result['tool_result'].content)['text']
59
-
60
- prompt = prompts.GET_FEED_PROMPT.substitute(
61
- website=website,
62
- user_query=user_query,
63
- intermediate_reply=intermediate_reply,
64
- articles=articles
65
- )
66
-
67
- input_message =[{
68
- 'role': 'user',
69
- 'content': prompt
70
- }]
71
-
72
- dialog.info('System: re-prompting LLM with return from %s call', tool_name)
73
- dialog.info('New prompt: %s ...', prompt[:75])
74
-
75
- logger.info('Re-prompting input %s', input_message)
76
- result = await bridge.process_query(
77
- prompts.GET_FEED_SYSTEM_PROMPT,
78
- input_message
79
- )
80
-
81
- try:
82
-
83
- reply = result['llm_response'].content[0].text
84
-
85
- except (IndexError, AttributeError):
86
- reply = 'No final reply from model'
87
-
88
- logger.info('LLM final reply: %s', reply)
89
 
90
  else:
 
 
91
  try:
92
  reply = result['llm_response'].content[0].text
93
 
94
  except AttributeError:
95
  reply = 'Bad reply - could not parse'
96
 
97
- logger.info('Direct, no-tool reply: %s', reply)
 
98
 
99
- dialog.info('LLM: %s ...', reply[:75])
100
- output_queue.put(reply)
101
  output_queue.put('bot-finished')
102
 
103
 
 
1
  '''Functions for controlling chat flow between Gradio and Anthropic/MCP'''
2
 
 
3
  import logging
4
  import queue
 
5
  from gradio.components.chatbot import ChatMessage
6
 
7
  from client import prompts
8
  from client.anthropic_bridge import AnthropicBridge
9
  import client.gradio_functions as gradio_funcs
10
+ import client.tool_workflows as tool_funcs
11
 
12
  # Create dialog logger
13
  dialog = gradio_funcs.get_dialog_logger(clear = True)
 
25
  reply = 'No reply from LLM'
26
 
27
  user_query = chat_history[-1]['content']
28
+
29
+ if len(chat_history) > 1:
30
+ prior_reply = chat_history[-2]['content']
31
+
32
+ else:
33
+ prior_reply = ''
34
+
35
  dialog.info('User: %s', user_query)
36
 
37
  input_messages = format_chat_history(chat_history)
 
40
  input_messages
41
  )
42
 
 
 
43
  if result['tool_result']:
44
+ logger.info('LLM called tool, entering tool loop.')
45
+ await tool_funcs.tool_loop(
46
+ user_query,
47
+ prior_reply,
48
+ result,
49
+ bridge,
50
+ output_queue,
51
+ dialog
52
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  else:
55
+ logger.info('LLM replied directly.')
56
+
57
  try:
58
  reply = result['llm_response'].content[0].text
59
 
60
  except AttributeError:
61
  reply = 'Bad reply - could not parse'
62
 
63
+ logger.info('Reply: %s', reply)
64
+ output_queue.put(reply)
65
 
 
 
66
  output_queue.put('bot-finished')
67
 
68
 
client/mcp_client.py CHANGED
@@ -71,7 +71,7 @@ class MCPTimeoutError(Exception):
71
  class MCPClientWrapper:
72
  '''Main client wrapper class for interacting with Model Context Protocol (MCP) endpoints'''
73
 
74
- def __init__(self, endpoint: str, timeout: float = 30.0, max_retries: int = 3):
75
  '''Initialize MCP client with endpoint URL
76
 
77
  Args:
 
71
  class MCPClientWrapper:
72
  '''Main client wrapper class for interacting with Model Context Protocol (MCP) endpoints'''
73
 
74
+ def __init__(self, endpoint: str, timeout: float = 360.0, max_retries: int = 3):
75
  '''Initialize MCP client with endpoint URL
76
 
77
  Args:
client/prompts.py CHANGED
@@ -5,18 +5,88 @@ from string import Template
5
 
6
  DEFAULT_SYSTEM_PROMPT = 'You are a helpful tool-using assistant.'
7
 
8
- GET_FEED_SYSTEM_PROMPT = '''You are a helpful assistant. Your job is to facilitate interactions between Human users and LLM agents.'''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  GET_FEED_PROMPT = Template(
11
- '''Below is an exchange between a user and an agent. The user has asked the agent to get new content from the $website RSS feed. In order to complete the request, the agent has called a function which returned the RSS feed content from $website in JSON format. Your job is to complete the exchange by using the returned JSON RSS feed data to write a human readable reply to the user.
 
 
 
 
12
 
13
  user: $user_query
14
 
15
  agent: $intermediate_reply
16
 
17
- function call: get_feed_content($website)
18
 
19
  function return: $articles
20
 
21
  assistant:'''
22
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  DEFAULT_SYSTEM_PROMPT = 'You are a helpful tool-using assistant.'
7
 
8
+ REPROMPTING_SYSTEM_PROMPT = '''
9
+ You are a helpful assistant. Your job is to facilitate interactions between a Human
10
+ user and LLM agent. To complete the user's request or answer their question, you may
11
+ need to call multiple functions sequentially and use each output to formulate the next
12
+ function call until you arrive at the final answer. But if you can satisfy the request
13
+ with a single function call, you should do so.
14
+
15
+ Here is an example exchange between the user and agent using multiple functions calls:
16
+
17
+ user: Can you give me a link to the article about the FAA modernizing air traffic control technology?
18
+
19
+ agent: OK, let me find the article you are referring to.
20
+
21
+ function call: find_article("FAA modernizing air traffic control technology")
22
+
23
+ function return: {"title": "FAA To Eliminate Floppy Disks Used In Air Traffic Control Systems"}
24
+
25
+ function call: get_link("FAA To Eliminate Floppy Disks Used In Air Traffic Control Systems")
26
+
27
+ function return: {"link": "https://www.tomshardware.com/the-faa-seeks-to-eliminate-floppy-disk-usage-in-air-traffic-control-systems"}
28
+
29
+ assistant: Here is the link to the article: [FAA To Eliminate Floppy Disks Used In Air Traffic Control Systems](https://www.tomshardware.com/the-faa-seeks-to-eliminate-floppy-disk-usage-in-air-traffic-control-systems)
30
+ '''
31
 
32
  GET_FEED_PROMPT = Template(
33
+ '''Below is an exchange between a user and an agent. The user has asked the agent
34
+ to get new content from the $website RSS feed. In order to complete the request,
35
+ the agent has called a function which returned the RSS feed content from $website
36
+ in JSON format. Your job is to complete the exchange by using the returned JSON
37
+ RSS feed data to write a human readable reply to the user.
38
 
39
  user: $user_query
40
 
41
  agent: $intermediate_reply
42
 
43
+ function call: get_feed_content("$website")
44
 
45
  function return: $articles
46
 
47
  assistant:'''
48
  )
49
+
50
+ OTHER_TOOL_PROMPT = Template(
51
+ '''Below is an exchange between a user and an agent. The user has asked the agent
52
+ "$user_query". The agent is completing the users request by calling a function or
53
+ functions. Complete the exchange by either:
54
+
55
+ 1. Calling the next function needed to get the information necessary to generate a
56
+ final answer for the user.
57
+ 2. Generating the final answer if you have enough information to do so already.
58
+
59
+ If no more information is needed to generate the final answer, do so without calling
60
+ additional tools.
61
+
62
+ agent: $prior_reply
63
+
64
+ user: $user_query
65
+
66
+ agent: $intermediate_reply
67
+
68
+ function call: $tool_name($tool_parameters)
69
+
70
+ function return: $tool_result
71
+ '''
72
+ )
73
+
74
+
75
+ '''Here is an example exchange between the user and agent using a single function call:
76
+
77
+ user: Give me a summary of the article "Apple announces Foundation Models and
78
+ Containerization frameworks"?
79
+
80
+ agent: OK, I will summarize the article.
81
+
82
+ function call: get_summary("Apple announces Foundation Models and Containerization frameworks")
83
+
84
+ function return: {"summary": "Apple announced new technologies and enhancements to its
85
+ developer tools to help create more beautiful, intelligent, and engaging app experiences
86
+ across Apple platforms, including a new software design and access to on-device Apple
87
+ Intelligence and large language models."}
88
+
89
+ assistant: Apple announced new technologies and enhancements to its developer tools to
90
+ help create more beautiful, intelligent, and engaging app experiences across Apple
91
+ platforms, including a new software design and access to on-device Apple Intelligence
92
+ and large language models.'''
client/tool_workflows.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''Functions to handle re-prompting and final reply generation
2
+ downstream of LLM tool calls.'''
3
+
4
+ import json
5
+ import logging
6
+ import queue
7
+ from anthropic.types import text_block
8
+ from client import prompts
9
+ from client.anthropic_bridge import AnthropicBridge
10
+
11
+ INTERMEDIATE_REPLY_HINTS = {
12
+ 'rss_mcp_server_context_search': 'Let me find some additional context before I generate a final answer.',
13
+ 'rss_mcp_server_find_article': 'I will find the title of that article.',
14
+ 'rss_mcp_server_get_summary': 'I will summarize that article',
15
+ 'rss_mcp_server_get_link': 'I will get the link to that article'
16
+ }
17
+
18
+ async def tool_loop(
19
+ user_query: str,
20
+ prior_reply: str,
21
+ result: list,
22
+ bridge: AnthropicBridge,
23
+ output_queue: queue.Queue,
24
+ dialog: logging.Logger
25
+ ) -> None:
26
+
27
+ '''Re-prompts the LLM in a loop until it generates a final reply based on tool output.
28
+
29
+ Args:
30
+ user_query: the original user input that provoked the tool call
31
+ result: the complete model reply containing the tool call
32
+ bridge: AnthropicBridge class instance
33
+ output_queue: queue to send results back to Gradio UI
34
+ dialog: logger instance to record intermediate responses and internal dialog
35
+ '''
36
+
37
+ tool_call = result['tool_call']
38
+ tool_name = tool_call['name']
39
+
40
+ if tool_name == 'rss_mcp_server_get_feed':
41
+ reply = await get_feed_call(
42
+ user_query,
43
+ result,
44
+ bridge,
45
+ output_queue,
46
+ dialog
47
+ )
48
+
49
+ output_queue.put(reply)
50
+
51
+ else:
52
+ tool_call = result['tool_call']
53
+ tool_name = tool_call['name']
54
+ tool_parameters = tool_call['parameters']
55
+ response_content = result['llm_response'].content[0]
56
+
57
+ if isinstance(response_content, text_block.TextBlock):
58
+ intermediate_reply = response_content.text
59
+ else:
60
+ intermediate_reply = INTERMEDIATE_REPLY_HINTS[tool_name]
61
+
62
+ dialog.info('LLM intermediate reply: %s', intermediate_reply)
63
+ dialog.info('MCP: called %s', tool_name)
64
+
65
+ tool_result = json.loads(result['tool_result'].content)['text']
66
+
67
+ prompt = prompts.OTHER_TOOL_PROMPT.substitute(
68
+ user_query=user_query,
69
+ prior_reply=prior_reply,
70
+ intermediate_reply=intermediate_reply,
71
+ tool_name=tool_name,
72
+ tool_parameters=tool_parameters,
73
+ tool_result=tool_result
74
+ )
75
+
76
+ dialog.info('System: re-prompting LLM with return from %s call', tool_name)
77
+
78
+ while True:
79
+
80
+ reply = await other_call(
81
+ prompt,
82
+ bridge,
83
+ dialog
84
+ )
85
+
86
+ if 'final reply' in reply:
87
+ final_reply = reply['final reply']
88
+ dialog.info('LLM final reply: %s ...', final_reply[:50])
89
+ output_queue.put(final_reply)
90
+ break
91
+
92
+ else:
93
+ prompt = reply['new_prompt']
94
+
95
+
96
+ async def get_feed_call(
97
+ user_query: str,
98
+ result: list,
99
+ bridge: AnthropicBridge,
100
+ output_queue: queue.Queue,
101
+ dialog: logging.Logger
102
+ ) -> str:
103
+
104
+ '''Re-prompts LLM after a call to get_feed().
105
+
106
+ Args:
107
+ user_query: the original user input that provoked the tool call
108
+ result: the complete model reply containing the tool call
109
+ bridge: AnthropicBridge class instance
110
+ output_queue: queue to send results back to Gradio UI
111
+ dialog: logger instance to record intermediate responses and internal dialog
112
+ '''
113
+
114
+ tool_call = result['tool_call']
115
+ tool_name = tool_call['name']
116
+ tool_parameters = tool_call['parameters']
117
+ website = tool_parameters['website']
118
+ response_content = result['llm_response'].content[0]
119
+
120
+ if isinstance(response_content, text_block.TextBlock):
121
+ intermediate_reply = response_content.text
122
+ else:
123
+ intermediate_reply = f'I Will check the {website} RSS feed for you'
124
+
125
+ dialog.info('LLM intermediate reply: %s', intermediate_reply)
126
+ dialog.info('MCP: called %s on %s', tool_name, website)
127
+
128
+ articles = json.loads(result['tool_result'].content)['text']
129
+
130
+ prompt = prompts.GET_FEED_PROMPT.substitute(
131
+ website=website,
132
+ user_query=user_query,
133
+ intermediate_reply=intermediate_reply,
134
+ articles=articles
135
+ )
136
+
137
+ input_message =[{
138
+ 'role': 'user',
139
+ 'content': prompt
140
+ }]
141
+
142
+ dialog.info('System: re-prompting LLM with return from %s call', tool_name)
143
+
144
+ result = await bridge.process_query(
145
+ prompts.REPROMPTING_SYSTEM_PROMPT,
146
+ input_message
147
+ )
148
+
149
+ try:
150
+
151
+ reply = result['llm_response'].content[0].text
152
+
153
+ except (IndexError, AttributeError):
154
+ reply = 'No final reply from model'
155
+
156
+ dialog.info('LLM final reply: %s ...', reply[:50])
157
+
158
+ output_queue.put(reply)
159
+
160
+
161
+ async def other_call(
162
+ prompt: list[dict],
163
+ bridge: AnthropicBridge,
164
+ dialog: logging.Logger
165
+ ) -> dict:
166
+
167
+ '''Re-prompts LLM after a call to get_feed().
168
+
169
+ Args:
170
+ prompt: prompt to to send the LLM
171
+ result: the complete model reply containing the tool call
172
+ bridge: AnthropicBridge class instance
173
+ output_queue: queue to send results back to Gradio UI
174
+ dialog: logger instance to record intermediate responses and internal dialog
175
+ '''
176
+
177
+ input_message =[{
178
+ 'role': 'user',
179
+ 'content': prompt
180
+ }]
181
+
182
+ result = await bridge.process_query(
183
+ prompts.REPROMPTING_SYSTEM_PROMPT,
184
+ input_message
185
+ )
186
+
187
+ if result['tool_result']:
188
+
189
+ tool_call = result['tool_call']
190
+ tool_name = tool_call['name']
191
+ tool_parameters = tool_call['parameters']
192
+ response_content = result['llm_response'].content[0]
193
+
194
+ if isinstance(response_content, text_block.TextBlock):
195
+ intermediate_reply = response_content.text
196
+ else:
197
+ intermediate_reply = INTERMEDIATE_REPLY_HINTS[tool_name]
198
+
199
+ dialog.info('LLM intermediate reply: %s', intermediate_reply)
200
+ dialog.info('MCP: called %s', tool_name)
201
+
202
+ tool_result = json.loads(result['tool_result'].content)['text']
203
+
204
+ prompt += f'agent: {intermediate_reply}\n'
205
+ prompt += f'function call: {tool_name}("{tool_parameters}")'
206
+ prompt += f'function return: {tool_result}'
207
+
208
+ dialog.info('System: re-prompting LLM with return from %s call', tool_name)
209
+
210
+ return {'new_prompt': prompt}
211
+
212
+ else:
213
+
214
+ reply = result['llm_response'].content[0].text
215
+ return {'final reply': reply}
rss_client.py CHANGED
@@ -23,7 +23,7 @@ Path('logs').mkdir(parents=True, exist_ok=True)
23
  # Clear old logs if present
24
  gradio_funcs.delete_old_logs('logs', 'rss_client')
25
 
26
- # Configure
27
  logging.basicConfig(
28
  handlers=[RotatingFileHandler(
29
  'logs/rss_client.log',
@@ -31,7 +31,7 @@ logging.basicConfig(
31
  backupCount=10,
32
  mode='w'
33
  )],
34
- level=logging.DEBUG,
35
  format='%(levelname)s - %(name)s - %(message)s'
36
  )
37
 
@@ -40,8 +40,7 @@ logger = logging.getLogger(__name__)
40
 
41
  # Handle MCP server connection and interactions
42
  RSS_CLIENT = MCPClientWrapper(
43
- 'https://agents-mcp-hackathon-rss-mcp-server.hf.space/gradio_api/mcp/sse',
44
- #'http://127.0.0.1:7861/gradio_api/mcp/sse'
45
  )
46
  logger.info('Started MCP client')
47
 
@@ -57,6 +56,7 @@ logger.info('Started Anthropic API bridge')
57
  OUTPUT_QUEUE = queue.Queue()
58
  logger.info('Created response queue')
59
 
 
60
  def user_message(message: str, history: list) -> Tuple[str, list]:
61
  '''Adds user message to conversation and returns for immediate posting.
62
 
@@ -93,29 +93,32 @@ def send_message(chat_history: list):
93
 
94
  chat_history.append({'role': 'assistant', 'content': ''})
95
 
96
- for character in response:
97
- chat_history[-1]['content'] += character
98
- time.sleep(0.005)
 
 
99
 
100
- yield chat_history
101
 
102
 
103
- with gr.Blocks(title='MCP RSS client') as demo:
104
  with gr.Row():
105
  gr.HTML(html.TITLE)
106
 
107
  gr.Markdown(html.DESCRIPTION)
 
108
 
109
  # MCP connection/tool dump
110
  connect_btn = gr.Button('Connect to MCP server')
111
- status = gr.Textbox(label='MCP server tool dump', interactive=False, lines=4)
112
  connect_btn.click(# pylint: disable=no-member
113
  RSS_CLIENT.list_tools,
114
  outputs=status
115
  )
116
 
117
  # Dialog log output
118
- dialog_output = gr.Textbox(label='Internal dialog', lines=10, max_lines=100)
119
  timer = gr.Timer(0.5, active=True)
120
 
121
  timer.tick( # pylint: disable=no-member
@@ -132,9 +135,9 @@ with gr.Blocks(title='MCP RSS client') as demo:
132
  )
133
 
134
  msg = gr.Textbox(
135
- 'Are there any new posts on Hacker News?',
136
  label='Ask about content or articles on a site or platform',
137
- placeholder='Is there anything new on Hacker News?',
138
  scale=4
139
  )
140
 
@@ -148,6 +151,7 @@ with gr.Blocks(title='MCP RSS client') as demo:
148
  if __name__ == '__main__':
149
 
150
  current_directory = os.getcwd()
 
151
 
152
  if 'pyrite' in current_directory:
153
  logger.info('Starting RASS on LAN')
 
23
  # Clear old logs if present
24
  gradio_funcs.delete_old_logs('logs', 'rss_client')
25
 
26
+ # Configure the root logger
27
  logging.basicConfig(
28
  handlers=[RotatingFileHandler(
29
  'logs/rss_client.log',
 
31
  backupCount=10,
32
  mode='w'
33
  )],
34
+ level=logging.INFO,
35
  format='%(levelname)s - %(name)s - %(message)s'
36
  )
37
 
 
40
 
41
  # Handle MCP server connection and interactions
42
  RSS_CLIENT = MCPClientWrapper(
43
+ 'https://agents-mcp-hackathon-rss-mcp-server.hf.space/gradio_api/mcp/sse'
 
44
  )
45
  logger.info('Started MCP client')
46
 
 
56
  OUTPUT_QUEUE = queue.Queue()
57
  logger.info('Created response queue')
58
 
59
+
60
  def user_message(message: str, history: list) -> Tuple[str, list]:
61
  '''Adds user message to conversation and returns for immediate posting.
62
 
 
93
 
94
  chat_history.append({'role': 'assistant', 'content': ''})
95
 
96
+ if response is not None:
97
+
98
+ for character in response:
99
+ chat_history[-1]['content'] += character
100
+ time.sleep(0.005)
101
 
102
+ yield chat_history
103
 
104
 
105
+ with gr.Blocks(title='RASS agent') as demo:
106
  with gr.Row():
107
  gr.HTML(html.TITLE)
108
 
109
  gr.Markdown(html.DESCRIPTION)
110
+ gr.Markdown(html.FEATURES_TOOLS)
111
 
112
  # MCP connection/tool dump
113
  connect_btn = gr.Button('Connect to MCP server')
114
+ status = gr.Textbox(label='MCP server tool dump', interactive=False, lines=5, max_lines=5)
115
  connect_btn.click(# pylint: disable=no-member
116
  RSS_CLIENT.list_tools,
117
  outputs=status
118
  )
119
 
120
  # Dialog log output
121
+ dialog_output = gr.Textbox(label='Internal dialog', lines=5, max_lines=5)
122
  timer = gr.Timer(0.5, active=True)
123
 
124
  timer.tick( # pylint: disable=no-member
 
135
  )
136
 
137
  msg = gr.Textbox(
138
+ 'Are there any new posts on Slashdot?',
139
  label='Ask about content or articles on a site or platform',
140
+ placeholder='Is there anything new on Slashdot?',
141
  scale=4
142
  )
143
 
 
151
  if __name__ == '__main__':
152
 
153
  current_directory = os.getcwd()
154
+ logger.info(current_directory)
155
 
156
  if 'pyrite' in current_directory:
157
  logger.info('Starting RASS on LAN')