Spaces:
Sleeping
Sleeping
bkb2135
commited on
Commit
·
4f0a02c
1
Parent(s):
90ceb20
Remove Unecessary Files
Browse files- .gitignore +0 -7
- api.py +0 -147
- data/wandb/tzebw6rb.parquet +0 -3
- forward.py +0 -249
- requirements.txt +1 -2
- server.py +0 -97
- validators/__init__.py +0 -4
- validators/base.py +0 -42
- validators/database.py +0 -85
- validators/sn1_validator_wrapper.py +0 -66
- validators/stream_manager.py +0 -73
- validators/streamer.py +0 -271
- validators/validator_utils.py +0 -35
.gitignore
CHANGED
@@ -155,15 +155,8 @@ dmypy.json
|
|
155 |
# Cython debug symbols
|
156 |
cython_debug/
|
157 |
|
158 |
-
# PyCharm
|
159 |
-
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
160 |
-
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
161 |
-
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
162 |
-
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
163 |
-
#.idea/
|
164 |
|
165 |
testing/
|
166 |
core
|
167 |
app.config.js
|
168 |
-
wandb
|
169 |
.vscode
|
|
|
155 |
# Cython debug symbols
|
156 |
cython_debug/
|
157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
testing/
|
160 |
core
|
161 |
app.config.js
|
|
|
162 |
.vscode
|
api.py
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
|
2 |
-
import atexit
|
3 |
-
import datetime
|
4 |
-
|
5 |
-
from flask import Flask, request, jsonify
|
6 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
7 |
-
|
8 |
-
import utils
|
9 |
-
|
10 |
-
app = Flask(__name__)
|
11 |
-
|
12 |
-
|
13 |
-
# Global variables (saves time on loading data)
|
14 |
-
state_vars = None
|
15 |
-
reload_timestamp = datetime.datetime.now().strftime('%D %T')
|
16 |
-
|
17 |
-
|
18 |
-
def load_data():
|
19 |
-
"""
|
20 |
-
Reload the state variables
|
21 |
-
"""
|
22 |
-
global state_vars, reload_timestamp
|
23 |
-
state_vars = utils.load_state_vars()
|
24 |
-
|
25 |
-
reload_timestamp = datetime.datetime.now().strftime('%D %T')
|
26 |
-
|
27 |
-
print(f'Reloaded data at {reload_timestamp}')
|
28 |
-
|
29 |
-
|
30 |
-
def start_scheduler():
|
31 |
-
scheduler = BackgroundScheduler()
|
32 |
-
scheduler.add_job(func=load_data, trigger="interval", seconds=60*30)
|
33 |
-
scheduler.start()
|
34 |
-
|
35 |
-
# Shut down the scheduler when exiting the app
|
36 |
-
atexit.register(lambda: scheduler.shutdown())
|
37 |
-
|
38 |
-
|
39 |
-
@app.route('/', methods=['GET'])
|
40 |
-
def home():
|
41 |
-
return "Welcome to the Bittensor Protein Folding Leaderboard API!"
|
42 |
-
|
43 |
-
|
44 |
-
@app.route('/updated', methods=['GET'])
|
45 |
-
def updated():
|
46 |
-
return reload_timestamp
|
47 |
-
|
48 |
-
|
49 |
-
@app.route('/data', methods=['GET'])
|
50 |
-
@app.route('/data/<period>', methods=['GET'])
|
51 |
-
def data(period=None):
|
52 |
-
"""
|
53 |
-
Get the productivity metrics
|
54 |
-
"""
|
55 |
-
assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
|
56 |
-
df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
|
57 |
-
return jsonify(
|
58 |
-
df.astype(str).to_dict(orient='records')
|
59 |
-
)
|
60 |
-
|
61 |
-
@app.route('/productivity', methods=['GET'])
|
62 |
-
@app.route('/productivity/<period>', methods=['GET'])
|
63 |
-
def productivity_metrics(period=None):
|
64 |
-
"""
|
65 |
-
Get the productivity metrics
|
66 |
-
"""
|
67 |
-
|
68 |
-
assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
|
69 |
-
df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
|
70 |
-
return jsonify(
|
71 |
-
utils.get_productivity(df)
|
72 |
-
)
|
73 |
-
|
74 |
-
|
75 |
-
@app.route('/throughput', methods=['GET'])
|
76 |
-
@app.route('/throughput/<period>', methods=['GET'])
|
77 |
-
def throughput_metrics(period=None):
|
78 |
-
"""
|
79 |
-
Get the throughput metrics
|
80 |
-
"""
|
81 |
-
assert period in ('24h', None), f"Invalid period: {period}. Must be '24h' or None."
|
82 |
-
df = state_vars["dataframe_24h"] if period == '24h' else state_vars["dataframe"]
|
83 |
-
return jsonify(utils.get_data_transferred(df))
|
84 |
-
|
85 |
-
|
86 |
-
@app.route('/metagraph', methods=['GET'])
|
87 |
-
def metagraph():
|
88 |
-
"""
|
89 |
-
Get the metagraph data
|
90 |
-
Returns:
|
91 |
-
- metagraph_data: List of dicts (from pandas DataFrame)
|
92 |
-
"""
|
93 |
-
|
94 |
-
df_m = state_vars["metagraph"]
|
95 |
-
|
96 |
-
return jsonify(
|
97 |
-
df_m.to_dict(orient='records')
|
98 |
-
)
|
99 |
-
|
100 |
-
@app.route('/leaderboard', methods=['GET'])
|
101 |
-
@app.route('/leaderboard/<entity>', methods=['GET'])
|
102 |
-
@app.route('/leaderboard/<entity>/<ntop>', methods=['GET'])
|
103 |
-
def leaderboard(entity='identity',ntop=10):
|
104 |
-
"""
|
105 |
-
Get the leaderboard data
|
106 |
-
Returns:
|
107 |
-
- leaderboard_data: List of dicts (from pandas DataFrame)
|
108 |
-
"""
|
109 |
-
|
110 |
-
assert entity in utils.ENTITY_CHOICES, f"Invalid entity choice: {entity}"
|
111 |
-
|
112 |
-
df_miners = utils.get_leaderboard(
|
113 |
-
state_vars["metagraph"],
|
114 |
-
ntop=int(ntop),
|
115 |
-
entity_choice=entity
|
116 |
-
)
|
117 |
-
|
118 |
-
return jsonify(
|
119 |
-
df_miners.to_dict(orient='records')
|
120 |
-
)
|
121 |
-
|
122 |
-
@app.route('/validator', methods=['GET'])
|
123 |
-
def validator():
|
124 |
-
"""
|
125 |
-
Get the validator data
|
126 |
-
Returns:
|
127 |
-
- validator_data: List of dicts (from pandas DataFrame)
|
128 |
-
"""
|
129 |
-
df_m = state_vars["metagraph"]
|
130 |
-
df_validators = df_m.loc[df_m.validator_trust > 0]
|
131 |
-
|
132 |
-
return jsonify(
|
133 |
-
df_validators.to_dict(orient='records')
|
134 |
-
)
|
135 |
-
|
136 |
-
|
137 |
-
if __name__ == '__main__':
|
138 |
-
|
139 |
-
load_data()
|
140 |
-
start_scheduler()
|
141 |
-
|
142 |
-
app.run(host='0.0.0.0', port=5001, debug=True)
|
143 |
-
|
144 |
-
|
145 |
-
# to test locally
|
146 |
-
# curl -X GET http://0.0.0.0:5001/data
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/wandb/tzebw6rb.parquet
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:97537f22180da6992b01c2ba7566fabc7c265d27c2c317eda2a191d52e08b843
|
3 |
-
size 13624028
|
|
|
|
|
|
|
|
forward.py
DELETED
@@ -1,249 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
import sys
|
3 |
-
import asyncio
|
4 |
-
import numpy as np
|
5 |
-
import bittensor as bt
|
6 |
-
import traceback
|
7 |
-
from typing import List, Dict, Awaitable
|
8 |
-
from prompting.agent import HumanAgent
|
9 |
-
from prompting.dendrite import DendriteResponseEvent
|
10 |
-
from prompting.conversation import create_task
|
11 |
-
from prompting.protocol import StreamPromptingSynapse
|
12 |
-
from prompting.rewards import RewardResult
|
13 |
-
from prompting.utils.uids import get_random_uids
|
14 |
-
from prompting.utils.logging import log_event
|
15 |
-
from prompting.utils.misc import async_log, serialize_exception_to_string
|
16 |
-
from dataclasses import dataclass
|
17 |
-
|
18 |
-
|
19 |
-
@async_log
|
20 |
-
async def generate_reference(agent):
|
21 |
-
loop = asyncio.get_running_loop()
|
22 |
-
result = await loop.run_in_executor(
|
23 |
-
None, agent.task.generate_reference, agent.llm_pipeline
|
24 |
-
)
|
25 |
-
return result
|
26 |
-
|
27 |
-
|
28 |
-
@async_log
|
29 |
-
async def execute_dendrite_call(dendrite_call):
|
30 |
-
responses = await dendrite_call
|
31 |
-
return responses
|
32 |
-
|
33 |
-
|
34 |
-
@dataclass
|
35 |
-
class StreamResult:
|
36 |
-
synapse: StreamPromptingSynapse = None
|
37 |
-
exception: BaseException = None
|
38 |
-
uid: int = None
|
39 |
-
|
40 |
-
|
41 |
-
async def process_response(uid: int, async_generator: Awaitable):
|
42 |
-
"""Process a single response asynchronously."""
|
43 |
-
try:
|
44 |
-
chunk = None # Initialize chunk with a default value
|
45 |
-
async for chunk in async_generator: # most important loop, as this is where we acquire the final synapse.
|
46 |
-
bt.logging.debug(f"\nchunk for uid {uid}: {chunk}")
|
47 |
-
|
48 |
-
if chunk is not None:
|
49 |
-
synapse = chunk # last object yielded is the synapse itself with completion filled
|
50 |
-
|
51 |
-
# Assuming chunk holds the last value yielded which should be a synapse
|
52 |
-
if isinstance(synapse, StreamPromptingSynapse):
|
53 |
-
return synapse
|
54 |
-
|
55 |
-
bt.logging.debug(
|
56 |
-
f"Synapse is not StreamPromptingSynapse. Miner uid {uid} completion set to '' "
|
57 |
-
)
|
58 |
-
except Exception as e:
|
59 |
-
# bt.logging.error(f"Error in generating reference or handling responses: {e}", exc_info=True)
|
60 |
-
traceback_details = traceback.format_exc()
|
61 |
-
bt.logging.error(
|
62 |
-
f"Error in generating reference or handling responses for uid {uid}: {e}\n{traceback_details}"
|
63 |
-
)
|
64 |
-
|
65 |
-
failed_synapse = StreamPromptingSynapse(
|
66 |
-
roles=["user"], messages=["failure"], completion=""
|
67 |
-
)
|
68 |
-
|
69 |
-
return failed_synapse
|
70 |
-
|
71 |
-
|
72 |
-
@async_log
|
73 |
-
async def handle_response(responses: Dict[int, Awaitable]) -> List[StreamResult]:
|
74 |
-
"""The handle_response function is responsible for creating asyncio tasks around acquiring streamed miner chunks
|
75 |
-
and processing them asynchronously. It then pairs the results with their original UIDs and returns a list of StreamResults.
|
76 |
-
|
77 |
-
Args:
|
78 |
-
responses (Dict[int, Awaitable]): Responses contains awaitables that are used to acquire streamed miner chunks.
|
79 |
-
|
80 |
-
Raises:
|
81 |
-
ValueError
|
82 |
-
|
83 |
-
Returns:
|
84 |
-
List[StreamResult]: DataClass containing the synapse, exception, and uid
|
85 |
-
"""
|
86 |
-
tasks_with_uid = [
|
87 |
-
(uid, responses[uid]) for uid, _ in responses.items()
|
88 |
-
] # Pair UIDs with their tasks
|
89 |
-
|
90 |
-
# Start tasks, preserving order and their associated UIDs
|
91 |
-
tasks = [process_response(uid, resp) for uid, resp in tasks_with_uid]
|
92 |
-
|
93 |
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
94 |
-
|
95 |
-
mapped_results = []
|
96 |
-
# Pair each result with its original uid
|
97 |
-
for (uid, _), result in zip(tasks_with_uid, results):
|
98 |
-
# If the result is a StreamPromptingSynapse, the response was successful and the stream result is added without exceptions
|
99 |
-
if isinstance(result, StreamPromptingSynapse):
|
100 |
-
mapped_results.append(StreamResult(synapse=result, uid=uid))
|
101 |
-
|
102 |
-
# If the result is an exception, the response was unsuccessful and the stream result is added with the exception and an empty synapse
|
103 |
-
elif isinstance(result, BaseException):
|
104 |
-
failed_synapse = StreamPromptingSynapse(
|
105 |
-
roles=["user"], messages=["failure"], completion=""
|
106 |
-
)
|
107 |
-
mapped_results.append(
|
108 |
-
StreamResult(synapse=failed_synapse, exception=result, uid=uid)
|
109 |
-
)
|
110 |
-
|
111 |
-
# If the result is neither an error or a StreamSynapse, log the error and raise a ValueError
|
112 |
-
else:
|
113 |
-
bt.logging.error(f"Unexpected result type for UID {uid}: {result}")
|
114 |
-
raise ValueError(f"Unexpected result type for UID {uid}: {result}")
|
115 |
-
|
116 |
-
return mapped_results
|
117 |
-
|
118 |
-
|
119 |
-
@async_log
|
120 |
-
async def generate_reference(agent: HumanAgent):
|
121 |
-
loop = asyncio.get_running_loop()
|
122 |
-
result = await loop.run_in_executor(
|
123 |
-
None, agent.task.generate_reference, agent.llm_pipeline
|
124 |
-
)
|
125 |
-
return result
|
126 |
-
|
127 |
-
|
128 |
-
def log_stream_results(stream_results: List[StreamResult]):
|
129 |
-
failed_responses = [
|
130 |
-
response for response in stream_results if response.exception is not None
|
131 |
-
]
|
132 |
-
empty_responses = [
|
133 |
-
response
|
134 |
-
for response in stream_results
|
135 |
-
if response.exception is None and response.synapse.completion == ""
|
136 |
-
]
|
137 |
-
non_empty_responses = [
|
138 |
-
response
|
139 |
-
for response in stream_results
|
140 |
-
if response.exception is None and response.synapse.completion != ""
|
141 |
-
]
|
142 |
-
|
143 |
-
bt.logging.info(f"Total of non_empty responses: ({len(non_empty_responses)})")
|
144 |
-
bt.logging.info(f"Total of empty responses: ({len(empty_responses)})")
|
145 |
-
bt.logging.info(
|
146 |
-
f"Total of failed responses: ({len(failed_responses)}):\n {failed_responses}"
|
147 |
-
)
|
148 |
-
|
149 |
-
for failed_response in failed_responses:
|
150 |
-
formatted_exception = serialize_exception_to_string(failed_response.exception)
|
151 |
-
bt.logging.error(
|
152 |
-
f"Failed response for uid {failed_response.uid}: {formatted_exception}"
|
153 |
-
)
|
154 |
-
|
155 |
-
|
156 |
-
async def run_step(
|
157 |
-
self, agent: HumanAgent, k: int, timeout: float, exclude: list = None
|
158 |
-
):
|
159 |
-
"""Executes a single step of the agent, which consists of:
|
160 |
-
- Getting a list of uids to query
|
161 |
-
- Querying the network
|
162 |
-
- Rewarding the network
|
163 |
-
- Updating the scores
|
164 |
-
- Logging the event
|
165 |
-
|
166 |
-
Args:
|
167 |
-
agent (HumanAgent): The agent to run the step for.
|
168 |
-
k (int): The number of uids to query.
|
169 |
-
timeout (float): The timeout for the queries.
|
170 |
-
exclude (list, optional): The list of uids to exclude from the query. Defaults to [].
|
171 |
-
"""
|
172 |
-
|
173 |
-
bt.logging.debug("run_step", agent.task.name)
|
174 |
-
|
175 |
-
# Record event start time.
|
176 |
-
start_time = time.time()
|
177 |
-
# Get the list of uids to query for this step.
|
178 |
-
uids = get_random_uids(self, k=k, exclude=exclude or []).to(self.device)
|
179 |
-
uids_cpu = uids.cpu().tolist()
|
180 |
-
|
181 |
-
axons = [self.metagraph.axons[uid] for uid in uids]
|
182 |
-
|
183 |
-
# Directly call dendrite and process responses in parallel
|
184 |
-
streams_responses = await self.dendrite(
|
185 |
-
axons=axons,
|
186 |
-
synapse=StreamPromptingSynapse(roles=["user"], messages=[agent.challenge]),
|
187 |
-
timeout=timeout,
|
188 |
-
deserialize=False,
|
189 |
-
streaming=True,
|
190 |
-
)
|
191 |
-
|
192 |
-
# Prepare the task for handling stream responses
|
193 |
-
handle_stream_responses_task = asyncio.create_task(
|
194 |
-
handle_response(responses=dict(zip(uids_cpu, streams_responses)))
|
195 |
-
)
|
196 |
-
|
197 |
-
if not agent.task.static_reference:
|
198 |
-
reference_generation_task = generate_reference(agent)
|
199 |
-
_, stream_results = await asyncio.gather(
|
200 |
-
reference_generation_task, handle_stream_responses_task
|
201 |
-
)
|
202 |
-
else:
|
203 |
-
stream_results = await handle_stream_responses_task
|
204 |
-
|
205 |
-
log_stream_results(stream_results)
|
206 |
-
|
207 |
-
all_synapses_results = [stream_result.synapse for stream_result in stream_results]
|
208 |
-
|
209 |
-
# Encapsulate the responses in a response event (dataclass)
|
210 |
-
response_event = DendriteResponseEvent(
|
211 |
-
responses=all_synapses_results, uids=uids, timeout=timeout
|
212 |
-
)
|
213 |
-
|
214 |
-
bt.logging.info(f"Created DendriteResponseEvent:\n {response_event}")
|
215 |
-
# Reward the responses and get the reward result (dataclass)
|
216 |
-
# This contains a list of RewardEvents but can be exported as a dict (column-wise) for logging etc
|
217 |
-
reward_result = RewardResult(
|
218 |
-
self.reward_pipeline,
|
219 |
-
agent=agent,
|
220 |
-
response_event=response_event,
|
221 |
-
device=self.device,
|
222 |
-
)
|
223 |
-
bt.logging.info(f"Created RewardResult:\n {reward_result}")
|
224 |
-
|
225 |
-
# The original idea was that the agent is 'satisfied' when it gets a good enough response (e.g. reward critera is met, such as ROUGE>threshold)
|
226 |
-
agent.update_progress(
|
227 |
-
top_reward=reward_result.rewards.max(),
|
228 |
-
top_response=response_event.completions[reward_result.rewards.argmax()],
|
229 |
-
)
|
230 |
-
|
231 |
-
self.update_scores(reward_result.rewards, uids)
|
232 |
-
|
233 |
-
stream_results_uids = [stream_result.uid for stream_result in stream_results]
|
234 |
-
stream_results_exceptions = [
|
235 |
-
serialize_exception_to_string(stream_result.exception)
|
236 |
-
for stream_result in stream_results
|
237 |
-
]
|
238 |
-
# Log the step event.
|
239 |
-
event = {
|
240 |
-
"block": self.block,
|
241 |
-
"step_time": time.time() - start_time,
|
242 |
-
"stream_results_uids": stream_results_uids,
|
243 |
-
"stream_results_exceptions": stream_results_exceptions,
|
244 |
-
**agent.__state_dict__(full=self.config.neuron.log_full),
|
245 |
-
**reward_result.__state_dict__(full=self.config.neuron.log_full),
|
246 |
-
**response_event.__state_dict__(),
|
247 |
-
}
|
248 |
-
|
249 |
-
return event
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -6,5 +6,4 @@ streamlit
|
|
6 |
plotly
|
7 |
wandb
|
8 |
numpy
|
9 |
-
pandas
|
10 |
-
bittensor
|
|
|
6 |
plotly
|
7 |
wandb
|
8 |
numpy
|
9 |
+
pandas
|
|
server.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
|
3 |
-
import bittensor as bt
|
4 |
-
from aiohttp import web
|
5 |
-
from aiohttp_apispec import (
|
6 |
-
docs,
|
7 |
-
request_schema,
|
8 |
-
response_schema,
|
9 |
-
setup_aiohttp_apispec,
|
10 |
-
validation_middleware,
|
11 |
-
)
|
12 |
-
|
13 |
-
from common import utils
|
14 |
-
from common.middlewares import api_key_middleware, json_parsing_middleware
|
15 |
-
from common.schemas import QueryChatSchema, StreamChunkSchema, StreamErrorSchema
|
16 |
-
from validators import QueryValidatorParams, S1ValidatorAPI, ValidatorAPI
|
17 |
-
|
18 |
-
|
19 |
-
@docs(tags=["Prompting API"], summary="Chat", description="Chat endpoint.")
|
20 |
-
@request_schema(QueryChatSchema)
|
21 |
-
@response_schema(StreamChunkSchema, 200)
|
22 |
-
@response_schema(StreamErrorSchema, 400)
|
23 |
-
async def chat(request: web.Request) -> web.StreamResponse:
|
24 |
-
"""Chat endpoint for the validator"""
|
25 |
-
params = QueryValidatorParams.from_request(request)
|
26 |
-
|
27 |
-
# Access the validator from the application context
|
28 |
-
validator: ValidatorAPI = request.app["validator"]
|
29 |
-
|
30 |
-
response = await validator.query_validator(params)
|
31 |
-
return response
|
32 |
-
|
33 |
-
|
34 |
-
@docs(
|
35 |
-
tags=["Prompting API"],
|
36 |
-
summary="Echo test",
|
37 |
-
description="Echo endpoint for testing purposes.",
|
38 |
-
)
|
39 |
-
@request_schema(QueryChatSchema)
|
40 |
-
@response_schema(StreamChunkSchema, 200)
|
41 |
-
@response_schema(StreamErrorSchema, 400)
|
42 |
-
async def echo_stream(request: web.Request) -> web.StreamResponse:
|
43 |
-
return await utils.echo_stream(request)
|
44 |
-
|
45 |
-
|
46 |
-
class ValidatorApplication(web.Application):
|
47 |
-
def __init__(self, validator_instance=None, *args, **kwargs):
|
48 |
-
super().__init__(*args, **kwargs)
|
49 |
-
|
50 |
-
self["validator"] = (
|
51 |
-
validator_instance if validator_instance else S1ValidatorAPI()
|
52 |
-
)
|
53 |
-
|
54 |
-
# Add middlewares to application
|
55 |
-
self.add_routes(
|
56 |
-
[
|
57 |
-
web.post("/chat/", chat),
|
58 |
-
web.post("/echo/", echo_stream),
|
59 |
-
]
|
60 |
-
)
|
61 |
-
self.setup_openapi()
|
62 |
-
self.setup_middlewares()
|
63 |
-
# TODO: Enable rewarding and other features
|
64 |
-
|
65 |
-
def setup_middlewares(self):
|
66 |
-
self.middlewares.append(validation_middleware)
|
67 |
-
self.middlewares.append(json_parsing_middleware)
|
68 |
-
self.middlewares.append(api_key_middleware)
|
69 |
-
|
70 |
-
def setup_openapi(self):
|
71 |
-
setup_aiohttp_apispec(
|
72 |
-
app=self,
|
73 |
-
title="Prompting API",
|
74 |
-
url="/docs/swagger.json",
|
75 |
-
swagger_path="/docs",
|
76 |
-
)
|
77 |
-
|
78 |
-
|
79 |
-
def main(run_aio_app=True, test=False) -> None:
|
80 |
-
loop = asyncio.get_event_loop()
|
81 |
-
port = 10000
|
82 |
-
if run_aio_app:
|
83 |
-
# Instantiate the application with the actual validator
|
84 |
-
bt.logging.info("Starting validator application.")
|
85 |
-
validator_app = ValidatorApplication()
|
86 |
-
bt.logging.success("Validator app initialized successfully", validator_app)
|
87 |
-
|
88 |
-
try:
|
89 |
-
web.run_app(validator_app, port=port, loop=loop)
|
90 |
-
except KeyboardInterrupt:
|
91 |
-
print("Keyboard interrupt detected. Exiting validator.")
|
92 |
-
finally:
|
93 |
-
pass
|
94 |
-
|
95 |
-
|
96 |
-
if __name__ == "__main__":
|
97 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validators/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .base import QueryValidatorParams, ValidatorAPI, MockValidator
|
2 |
-
from .sn1_validator_wrapper import S1ValidatorAPI
|
3 |
-
from .streamer import AsyncResponseDataStreamer
|
4 |
-
from .stream_manager import StreamManager
|
|
|
|
|
|
|
|
|
|
validators/base.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
from abc import ABC, abstractmethod
|
2 |
-
from typing import List
|
3 |
-
from dataclasses import dataclass
|
4 |
-
from aiohttp.web import Response, Request, StreamResponse
|
5 |
-
|
6 |
-
|
7 |
-
@dataclass
|
8 |
-
class QueryValidatorParams:
|
9 |
-
k_miners: int
|
10 |
-
exclude: List[str]
|
11 |
-
roles: List[str]
|
12 |
-
messages: List[str]
|
13 |
-
timeout: int
|
14 |
-
prefer: str
|
15 |
-
request: Request
|
16 |
-
sampling_mode: str
|
17 |
-
|
18 |
-
@staticmethod
|
19 |
-
def from_request(request: Request):
|
20 |
-
data = request["data"]
|
21 |
-
|
22 |
-
return QueryValidatorParams(
|
23 |
-
k_miners=data.get("k", 10),
|
24 |
-
exclude=data.get("exclude", []),
|
25 |
-
roles=data["roles"],
|
26 |
-
messages=data["messages"],
|
27 |
-
timeout=data.get("timeout", 10),
|
28 |
-
prefer=data.get("prefer", "longest"),
|
29 |
-
request=request,
|
30 |
-
sampling_mode=data.get("sampling_mode", "random"),
|
31 |
-
)
|
32 |
-
|
33 |
-
|
34 |
-
class ValidatorAPI(ABC):
|
35 |
-
@abstractmethod
|
36 |
-
async def query_validator(self, params: QueryValidatorParams) -> StreamResponse:
|
37 |
-
pass
|
38 |
-
|
39 |
-
|
40 |
-
class MockValidator(ValidatorAPI):
|
41 |
-
async def query_validator(self, params: QueryValidatorParams) -> StreamResponse:
|
42 |
-
...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validators/database.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import aiofiles
|
4 |
-
import bittensor as bt
|
5 |
-
from .streamer import ProcessedStreamResponse
|
6 |
-
|
7 |
-
|
8 |
-
class LogDatabase:
|
9 |
-
"""
|
10 |
-
A class to manage a log database stored as a JSONL (JSON Lines) file.
|
11 |
-
|
12 |
-
Attributes:
|
13 |
-
log_database_path (str): The path to the log database file.
|
14 |
-
|
15 |
-
Methods:
|
16 |
-
ensure_db_exists(file_path):
|
17 |
-
Ensures that the log database file exists. If it doesn't, an empty file is created.
|
18 |
-
|
19 |
-
add_streams_to_db(stream_responses: ProcessedStreamResponse):
|
20 |
-
Asynchronously adds stream responses to the log database.
|
21 |
-
|
22 |
-
append_dicts_to_file(file_path, dictionaries):
|
23 |
-
Asynchronously appends a list of dictionaries to the specified file.
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self, log_database_path: str):
|
27 |
-
"""
|
28 |
-
Initializes the LogDatabase with the given log database file path.
|
29 |
-
|
30 |
-
Args:
|
31 |
-
log_database_path (str): The path to the log database file.
|
32 |
-
"""
|
33 |
-
self.log_database_path = log_database_path
|
34 |
-
self.ensure_db_exists(log_database_path)
|
35 |
-
|
36 |
-
def ensure_db_exists(self, file_path):
|
37 |
-
"""
|
38 |
-
Ensures that the log database file exists. If it doesn't, creates an empty JSONL file.
|
39 |
-
|
40 |
-
Args:
|
41 |
-
file_path (str): The path to the log database file.
|
42 |
-
"""
|
43 |
-
if not os.path.exists(file_path):
|
44 |
-
# Create an empty JSONL file
|
45 |
-
with open(file_path, "w") as file:
|
46 |
-
pass
|
47 |
-
# TODO: change log to debug
|
48 |
-
bt.logging.info(f"File '{file_path}' created.")
|
49 |
-
else:
|
50 |
-
bt.logging.info(f"File '{file_path}' already exists.")
|
51 |
-
|
52 |
-
async def add_streams_to_db(self, stream_responses: ProcessedStreamResponse):
|
53 |
-
"""
|
54 |
-
Asynchronously adds stream responses to the log database.
|
55 |
-
|
56 |
-
Args:
|
57 |
-
stream_responses (ProcessedStreamResponse): A list of processed stream responses to add to the log database.
|
58 |
-
|
59 |
-
Raises:
|
60 |
-
Exception: If an error occurs while adding streams to the database.
|
61 |
-
"""
|
62 |
-
bt.logging.info(f"Writing streams to the database...")
|
63 |
-
try:
|
64 |
-
stream_responses_dict = [
|
65 |
-
dict(stream_response) for stream_response in stream_responses
|
66 |
-
]
|
67 |
-
await self.append_dicts_to_file(
|
68 |
-
self.log_database_path, stream_responses_dict
|
69 |
-
)
|
70 |
-
bt.logging.success("Streams added to the database.")
|
71 |
-
except Exception as e:
|
72 |
-
bt.logging.error(f"Error while adding streams to the database: {e}")
|
73 |
-
raise e
|
74 |
-
|
75 |
-
async def append_dicts_to_file(self, file_path, dictionaries):
|
76 |
-
"""
|
77 |
-
Asynchronously appends a list of dictionaries to the specified file.
|
78 |
-
|
79 |
-
Args:
|
80 |
-
file_path (str): The path to the file where dictionaries will be appended.
|
81 |
-
dictionaries (list): A list of dictionaries to append to the file.
|
82 |
-
"""
|
83 |
-
async with aiofiles.open(file_path, mode="a") as file:
|
84 |
-
for dictionary in dictionaries:
|
85 |
-
await file.write(json.dumps(dictionary) + "\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validators/sn1_validator_wrapper.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import random
|
3 |
-
import bittensor as bt
|
4 |
-
from prompting.validator import Validator
|
5 |
-
from prompting.utils.uids import get_random_uids
|
6 |
-
from prompting.protocol import StreamPromptingSynapse
|
7 |
-
from .base import QueryValidatorParams, ValidatorAPI
|
8 |
-
from aiohttp.web_response import Response, StreamResponse
|
9 |
-
from .streamer import AsyncResponseDataStreamer
|
10 |
-
from .validator_utils import get_top_incentive_uids
|
11 |
-
from .stream_manager import StreamManager
|
12 |
-
|
13 |
-
|
14 |
-
class S1ValidatorAPI(ValidatorAPI):
|
15 |
-
def __init__(self):
|
16 |
-
self.validator = Validator()
|
17 |
-
|
18 |
-
def sample_uids(self, params: QueryValidatorParams):
|
19 |
-
if params.sampling_mode == "random":
|
20 |
-
uids = get_random_uids(
|
21 |
-
self.validator, k=params.k_miners, exclude=params.exclude or []
|
22 |
-
).tolist()
|
23 |
-
return uids
|
24 |
-
if params.sampling_mode == "top_incentive":
|
25 |
-
metagraph = self.validator.metagraph
|
26 |
-
vpermit_tao_limit = self.validator.config.neuron.vpermit_tao_limit
|
27 |
-
|
28 |
-
top_uids = get_top_incentive_uids(
|
29 |
-
metagraph, k=params.k_miners, vpermit_tao_limit=vpermit_tao_limit
|
30 |
-
)
|
31 |
-
|
32 |
-
return top_uids
|
33 |
-
|
34 |
-
async def get_stream_response(self, params: QueryValidatorParams) -> StreamResponse:
|
35 |
-
# Guess the task name of current request
|
36 |
-
# task_name = utils.guess_task_name(params.messages[-1])
|
37 |
-
|
38 |
-
# Get the list of uids to query for this step.
|
39 |
-
uids = self.sample_uids(params)
|
40 |
-
axons = [self.validator.metagraph.axons[uid] for uid in uids]
|
41 |
-
|
42 |
-
# Make calls to the network with the prompt.
|
43 |
-
bt.logging.info(
|
44 |
-
f"Sampling dendrite by {params.sampling_mode} with roles {params.roles} and messages {params.messages}"
|
45 |
-
)
|
46 |
-
|
47 |
-
streams_responses = await self.validator.dendrite(
|
48 |
-
axons=axons,
|
49 |
-
synapse=StreamPromptingSynapse(
|
50 |
-
roles=params.roles, messages=params.messages
|
51 |
-
),
|
52 |
-
timeout=params.timeout,
|
53 |
-
deserialize=False,
|
54 |
-
streaming=True,
|
55 |
-
)
|
56 |
-
|
57 |
-
# Creates a streamer from the selected stream
|
58 |
-
stream_manager = StreamManager()
|
59 |
-
selected_stream = await stream_manager.process_streams(
|
60 |
-
params.request, streams_responses, uids
|
61 |
-
)
|
62 |
-
|
63 |
-
return selected_stream
|
64 |
-
|
65 |
-
async def query_validator(self, params: QueryValidatorParams) -> Response:
|
66 |
-
return await self.get_stream_response(params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validators/stream_manager.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import bittensor as bt
|
3 |
-
from .streamer import AsyncResponseDataStreamer
|
4 |
-
from .database import LogDatabase
|
5 |
-
from typing import List, AsyncIterator
|
6 |
-
from aiohttp.web import Request
|
7 |
-
|
8 |
-
|
9 |
-
class StreamManager:
|
10 |
-
"""
|
11 |
-
A class to manage the processing of multiple asynchronous data streams and log their responses.
|
12 |
-
|
13 |
-
Attributes:
|
14 |
-
log_database (LogDatabase): The log database to store stream responses.
|
15 |
-
|
16 |
-
Methods:
|
17 |
-
process_streams(request, streams_responses, stream_uids):
|
18 |
-
Processes multiple asynchronous streams, logs their responses, and returns the selected stream response.
|
19 |
-
"""
|
20 |
-
|
21 |
-
def __init__(self, log_database_path: str = "requests_db.jsonl"):
|
22 |
-
"""
|
23 |
-
Initializes the StreamManager with the given log database file path.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
log_database_path (str): The path to the log database file, defaults to "requests_db.jsonl".
|
27 |
-
"""
|
28 |
-
self.log_database = LogDatabase(log_database_path)
|
29 |
-
|
30 |
-
async def process_streams(
|
31 |
-
self,
|
32 |
-
request: Request,
|
33 |
-
streams_responses: List[AsyncIterator],
|
34 |
-
stream_uids: List[int],
|
35 |
-
):
|
36 |
-
"""
|
37 |
-
Processes multiple asynchronous streams, logs their responses, and returns the selected stream response (stream from first non-empty chunk).
|
38 |
-
|
39 |
-
Args:
|
40 |
-
request (Request): The web request object.
|
41 |
-
streams_responses (List[AsyncIterator]): A list of asynchronous iterators representing the streams.
|
42 |
-
stream_uids (List[int]): A list of unique IDs for the streams.
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
ProcessedStreamResponse: The response from the selected stream.
|
46 |
-
"""
|
47 |
-
lock = asyncio.Lock()
|
48 |
-
|
49 |
-
streamers = [
|
50 |
-
AsyncResponseDataStreamer(
|
51 |
-
async_iterator=stream, selected_uid=stream_uid, lock=lock
|
52 |
-
)
|
53 |
-
for stream, stream_uid in zip(streams_responses, stream_uids)
|
54 |
-
]
|
55 |
-
completed_streams = await asyncio.gather(
|
56 |
-
*[streamer.stream(request) for streamer in streamers]
|
57 |
-
)
|
58 |
-
|
59 |
-
lock.release()
|
60 |
-
bt.logging.info(f"Streams from uids: {stream_uids} processing completed.")
|
61 |
-
|
62 |
-
await self.log_database.add_streams_to_db(completed_streams)
|
63 |
-
# Gets the first stream that acquired the lock, meaning the first stream that was able to return a non-empty chunk
|
64 |
-
selected_stream = next(
|
65 |
-
(
|
66 |
-
completed_stream
|
67 |
-
for streamer, completed_stream in zip(streamers, completed_streams)
|
68 |
-
if streamer.lock_acquired
|
69 |
-
),
|
70 |
-
None,
|
71 |
-
)
|
72 |
-
|
73 |
-
return selected_stream
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validators/streamer.py
DELETED
@@ -1,271 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import time
|
3 |
-
import asyncio
|
4 |
-
import traceback
|
5 |
-
import bittensor as bt
|
6 |
-
from pydantic import BaseModel
|
7 |
-
from datetime import datetime
|
8 |
-
from typing import AsyncIterator, Optional, List, Union
|
9 |
-
from aiohttp import web, web_response
|
10 |
-
from prompting.protocol import StreamPromptingSynapse
|
11 |
-
|
12 |
-
|
13 |
-
class StreamChunk(BaseModel):
|
14 |
-
"""
|
15 |
-
A model representing a chunk of streaming data.
|
16 |
-
|
17 |
-
Attributes:
|
18 |
-
delta (str): The change in the stream.
|
19 |
-
finish_reason (Optional[str]): The reason for finishing the stream.
|
20 |
-
accumulated_chunks (List[str]): List of accumulated chunks.
|
21 |
-
accumulated_chunks_timings (List[float]): Timings for the accumulated chunks.
|
22 |
-
timestamp (str): The timestamp of the chunk.
|
23 |
-
sequence_number (int): The sequence number of the chunk.
|
24 |
-
selected_uid (int): The selected user ID.
|
25 |
-
"""
|
26 |
-
delta: str
|
27 |
-
finish_reason: Optional[str]
|
28 |
-
accumulated_chunks: List[str]
|
29 |
-
accumulated_chunks_timings: List[float]
|
30 |
-
timestamp: str
|
31 |
-
sequence_number: int
|
32 |
-
selected_uid: int
|
33 |
-
|
34 |
-
def encode(self, encoding: str) -> bytes:
|
35 |
-
"""
|
36 |
-
Encodes the StreamChunk instance to a JSON-formatted bytes object.
|
37 |
-
|
38 |
-
Args:
|
39 |
-
encoding (str): The encoding to use.
|
40 |
-
|
41 |
-
Returns:
|
42 |
-
bytes: The encoded JSON data.
|
43 |
-
"""
|
44 |
-
data = json.dumps(self.dict(), indent=4)
|
45 |
-
return data.encode(encoding)
|
46 |
-
|
47 |
-
|
48 |
-
class StreamError(BaseModel):
|
49 |
-
"""
|
50 |
-
A model representing an error in the streaming data.
|
51 |
-
|
52 |
-
Attributes:
|
53 |
-
error (str): The error message.
|
54 |
-
timestamp (str): The timestamp of the error.
|
55 |
-
sequence_number (int): The sequence number at the time of error.
|
56 |
-
finish_reason (str): The reason for finishing the stream, defaults to "error".
|
57 |
-
"""
|
58 |
-
error: str
|
59 |
-
timestamp: str
|
60 |
-
sequence_number: int
|
61 |
-
finish_reason: str = "error"
|
62 |
-
|
63 |
-
def encode(self, encoding: str) -> bytes:
|
64 |
-
data = json.dumps(self.dict(), indent=4)
|
65 |
-
return data.encode(encoding)
|
66 |
-
|
67 |
-
|
68 |
-
ProcessedStreamResponse = Union[StreamChunk, StreamError]
|
69 |
-
|
70 |
-
|
71 |
-
class AsyncResponseDataStreamer:
|
72 |
-
"""
|
73 |
-
A class to manage asynchronous streaming of response data.
|
74 |
-
|
75 |
-
Attributes:
|
76 |
-
async_iterator (AsyncIterator): An asynchronous iterator for streaming data.
|
77 |
-
selected_uid (int): The selected user ID.
|
78 |
-
lock (asyncio.Lock): An asyncio lock to ensure exclusive access.
|
79 |
-
delay (float): Delay between processing chunks, defaults to 0.1 seconds.
|
80 |
-
accumulated_chunks (List[str]): List of accumulated chunks.
|
81 |
-
accumulated_chunks_timings (List[float]): Timings for the accumulated chunks.
|
82 |
-
finish_reason (str): The reason for finishing the stream.
|
83 |
-
sequence_number (int): The sequence number of the stream.
|
84 |
-
lock_acquired (bool): Flag indicating if the lock was acquired.
|
85 |
-
"""
|
86 |
-
def __init__(
|
87 |
-
self,
|
88 |
-
async_iterator: AsyncIterator,
|
89 |
-
selected_uid: int,
|
90 |
-
lock: asyncio.Lock,
|
91 |
-
delay: float = 0.1,
|
92 |
-
):
|
93 |
-
self.async_iterator = async_iterator
|
94 |
-
self.delay = delay
|
95 |
-
self.selected_uid = selected_uid
|
96 |
-
self.accumulated_chunks: List[str] = []
|
97 |
-
self.accumulated_chunks_timings: List[float] = []
|
98 |
-
self.finish_reason: str = None
|
99 |
-
self.sequence_number: int = 0
|
100 |
-
self.lock = lock
|
101 |
-
self.lock_acquired = False
|
102 |
-
|
103 |
-
def ensure_response_is_created(
|
104 |
-
self, initiated_response: web.StreamResponse
|
105 |
-
) -> web.StreamResponse:
|
106 |
-
"""
|
107 |
-
Ensures that a StreamResponse is created if it does not already exist.
|
108 |
-
|
109 |
-
Args:
|
110 |
-
initiated_response (web.StreamResponse): The initiated response.
|
111 |
-
|
112 |
-
Returns:
|
113 |
-
web.StreamResponse: The ensured response.
|
114 |
-
"""
|
115 |
-
# Creates response if it was not created
|
116 |
-
if initiated_response == None:
|
117 |
-
initiated_response = web_response.StreamResponse(status=200, reason="OK")
|
118 |
-
initiated_response.headers["Content-Type"] = "application/json"
|
119 |
-
return initiated_response
|
120 |
-
|
121 |
-
return initiated_response
|
122 |
-
|
123 |
-
async def write_to_stream(
|
124 |
-
self,
|
125 |
-
request: web.Request,
|
126 |
-
initiated_response: web.StreamResponse,
|
127 |
-
stream_chunk: StreamChunk,
|
128 |
-
lock: asyncio.Lock,
|
129 |
-
) -> web.StreamResponse:
|
130 |
-
"""
|
131 |
-
Writes a stream chunk to the response if the lock is acquired.
|
132 |
-
|
133 |
-
Args:
|
134 |
-
request (web.Request): The web request object.
|
135 |
-
initiated_response (web.StreamResponse): The initiated response.
|
136 |
-
stream_chunk (StreamChunk): The chunk of stream data to write.
|
137 |
-
lock (asyncio.Lock): The lock to ensure exclusive access.
|
138 |
-
|
139 |
-
Returns:
|
140 |
-
web.StreamResponse: The response with the written chunk.
|
141 |
-
"""
|
142 |
-
# Try to acquire the lock and sets the lock_acquired flag. Only the stream that acquires the lock should write to the response
|
143 |
-
if lock.locked() == False:
|
144 |
-
self.lock_acquired = await lock.acquire()
|
145 |
-
|
146 |
-
if initiated_response == None and self.lock_acquired:
|
147 |
-
initiated_response = self.ensure_response_is_created(initiated_response)
|
148 |
-
# Prepare and send the headers
|
149 |
-
await initiated_response.prepare(request)
|
150 |
-
|
151 |
-
if self.lock_acquired:
|
152 |
-
await initiated_response.write(stream_chunk.encode("utf-8"))
|
153 |
-
else:
|
154 |
-
bt.logging.debug(
|
155 |
-
f"Stream of uid {stream_chunk.selected_uid} was not the first to return, skipping..."
|
156 |
-
)
|
157 |
-
|
158 |
-
return initiated_response
|
159 |
-
|
160 |
-
async def stream(self, request: web.Request) -> ProcessedStreamResponse:
|
161 |
-
"""
|
162 |
-
Streams data from the async iterator and writes it to the response.
|
163 |
-
|
164 |
-
Args:
|
165 |
-
request (web.Request): The web request object.
|
166 |
-
|
167 |
-
Returns:
|
168 |
-
ProcessedStreamResponse: The final processed stream response.
|
169 |
-
|
170 |
-
Raises:
|
171 |
-
ValueError: If the stream does not return a valid synapse.
|
172 |
-
"""
|
173 |
-
try:
|
174 |
-
start_time = time.time()
|
175 |
-
client_response: web.Response = None
|
176 |
-
final_response: ProcessedStreamResponse
|
177 |
-
|
178 |
-
async for chunk in self.async_iterator:
|
179 |
-
if isinstance(chunk, str):
|
180 |
-
# If chunk is empty, skip
|
181 |
-
if not chunk:
|
182 |
-
continue
|
183 |
-
|
184 |
-
self.accumulated_chunks.append(chunk)
|
185 |
-
self.accumulated_chunks_timings.append(time.time() - start_time)
|
186 |
-
# Gets new response state
|
187 |
-
self.sequence_number += 1
|
188 |
-
new_response_state = self._create_chunk_response(
|
189 |
-
chunk
|
190 |
-
)
|
191 |
-
# Writes the new response state to the response
|
192 |
-
client_response = await self.write_to_stream(
|
193 |
-
request, client_response, new_response_state, self.lock
|
194 |
-
)
|
195 |
-
|
196 |
-
if chunk is not None and isinstance(chunk, StreamPromptingSynapse):
|
197 |
-
if len(self.accumulated_chunks) == 0:
|
198 |
-
self.accumulated_chunks.append(chunk.completion)
|
199 |
-
self.accumulated_chunks_timings.append(time.time() - start_time)
|
200 |
-
|
201 |
-
self.finish_reason = "completed"
|
202 |
-
self.sequence_number += 1
|
203 |
-
# Assuming the last chunk holds the last value yielded which should be a synapse with the completion filled
|
204 |
-
synapse = chunk
|
205 |
-
final_response = self._create_chunk_response(synapse.completion)
|
206 |
-
|
207 |
-
if synapse.completion:
|
208 |
-
client_response = await self.write_to_stream(
|
209 |
-
request, client_response, final_response, self.lock
|
210 |
-
)
|
211 |
-
else:
|
212 |
-
raise ValueError("Stream did not return a valid synapse.")
|
213 |
-
|
214 |
-
except Exception as e:
|
215 |
-
bt.logging.error(
|
216 |
-
f"Encountered an error while processing stream for uid {self.selected_uid} get_stream_response:\n{traceback.format_exc()}"
|
217 |
-
)
|
218 |
-
error_response = self._create_error_response(str(e))
|
219 |
-
final_response = error_response
|
220 |
-
|
221 |
-
# Only the stream that acquires the lock should write the error response
|
222 |
-
if self.lock_acquired:
|
223 |
-
self.ensure_response_is_created(client_response)
|
224 |
-
client_response.set_status(500, reason="Internal error")
|
225 |
-
client_response.write(error_response.encode("utf-8"))
|
226 |
-
finally:
|
227 |
-
# Only the stream that acquires the lock should close the response
|
228 |
-
if self.lock_acquired:
|
229 |
-
self.ensure_response_is_created(client_response)
|
230 |
-
# Ensure to close the response properly
|
231 |
-
await client_response.write_eof()
|
232 |
-
|
233 |
-
return final_response
|
234 |
-
|
235 |
-
def _create_chunk_response(self, chunk: str) -> StreamChunk:
|
236 |
-
"""
|
237 |
-
Creates a StreamChunk object with the current state.
|
238 |
-
|
239 |
-
:param chunk: List of strings representing the current chunk.
|
240 |
-
:return: StreamChunk object.
|
241 |
-
"""
|
242 |
-
return StreamChunk(
|
243 |
-
delta=chunk,
|
244 |
-
finish_reason=self.finish_reason,
|
245 |
-
accumulated_chunks=self.accumulated_chunks,
|
246 |
-
accumulated_chunks_timings=self.accumulated_chunks_timings,
|
247 |
-
timestamp=self._current_timestamp(),
|
248 |
-
sequence_number=self.sequence_number,
|
249 |
-
selected_uid=self.selected_uid,
|
250 |
-
)
|
251 |
-
|
252 |
-
def _create_error_response(self, error_message: str) -> StreamError:
|
253 |
-
"""
|
254 |
-
Creates a StreamError object.
|
255 |
-
|
256 |
-
:param error_message: Error message to include in the StreamError.
|
257 |
-
:return: StreamError object.
|
258 |
-
"""
|
259 |
-
return StreamError(
|
260 |
-
error=error_message,
|
261 |
-
timestamp=self._current_timestamp(),
|
262 |
-
sequence_number=self.sequence_number,
|
263 |
-
)
|
264 |
-
|
265 |
-
def _current_timestamp(self) -> str:
|
266 |
-
"""
|
267 |
-
Returns the current timestamp in ISO format.
|
268 |
-
|
269 |
-
:return: Current timestamp as a string.
|
270 |
-
"""
|
271 |
-
return datetime.utcnow().isoformat()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validators/validator_utils.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
from prompting.utils.uids import check_uid_availability
|
3 |
-
|
4 |
-
|
5 |
-
def get_top_incentive_uids(metagraph, k: int, vpermit_tao_limit: int) -> List[int]:
|
6 |
-
miners_uids = list(
|
7 |
-
map(
|
8 |
-
int,
|
9 |
-
filter(
|
10 |
-
lambda uid: check_uid_availability(metagraph, uid, vpermit_tao_limit),
|
11 |
-
metagraph.uids,
|
12 |
-
),
|
13 |
-
)
|
14 |
-
)
|
15 |
-
|
16 |
-
# Builds a dictionary of uids and their corresponding incentives
|
17 |
-
all_miners_incentives = {
|
18 |
-
"miners_uids": miners_uids,
|
19 |
-
"incentives": list(map(lambda uid: metagraph.I[uid], miners_uids)),
|
20 |
-
}
|
21 |
-
|
22 |
-
# Zip the uids and their corresponding incentives into a list of tuples
|
23 |
-
uid_incentive_pairs = list(
|
24 |
-
zip(all_miners_incentives["miners_uids"], all_miners_incentives["incentives"])
|
25 |
-
)
|
26 |
-
|
27 |
-
# Sort the list of tuples by the incentive value in descending order
|
28 |
-
uid_incentive_pairs_sorted = sorted(
|
29 |
-
uid_incentive_pairs, key=lambda x: x[1], reverse=True
|
30 |
-
)
|
31 |
-
|
32 |
-
# Extract the top 10 uids
|
33 |
-
top_k_uids = [uid for uid, incentive in uid_incentive_pairs_sorted[:k]]
|
34 |
-
|
35 |
-
return top_k_uids
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|