p-ferreira commited on
Commit
32f9f6f
·
unverified ·
2 Parent(s): 6d2280a f5c64d0

Merge pull request #4 from macrocosm-os/features/streamer

Browse files
README.md CHANGED
@@ -1,12 +1,12 @@
1
 
2
  <picture>
3
  <source srcset="./assets/macrocosmos-white.png" media="(prefers-color-scheme: dark)">
4
- <img src="macrocosmos-white.png">
5
  </picture>
6
 
7
  <picture>
8
  <source srcset="./assets/macrocosmos-black.png" media="(prefers-color-scheme: light)">
9
- <img src="macrocosmos-black.png">
10
  </picture>
11
 
12
 
@@ -75,28 +75,36 @@ EXPECTED_ACCESS_KEY=<ACCESS_KEY> pm2 start server.py --interpreter python3 --nam
75
  ## API Usage
76
  At present, the API provides two endpoints: `/chat` (live) and `/echo` (test).
77
 
78
- `/chat` is used to chat with the network and receive a response. The endpoint requires a JSON payload with the following fields:
79
- - `k: int`: The number of responses to return
80
- - `timeout: float`: The time in seconds to wait for a response
81
- - `roles: List[str]`: The roles of the agents to query
82
- - `messages: List[str]`: The messages to send to the network
83
- - `prefer: str`: The preferred response to use as the default view. Should be one of `{'longest', 'shortest'}`
84
-
85
- Responses from the `/chat` endpoint are streamed back to the client as they are received from the network. Upon completion, the server will return a JSON response with the following fields:
86
- - `streamed_chunks: List[str]`: The streamed responses from the network
87
- - `streamed_chunks_timings: List[float]`: The time taken to receive each streamed response
88
- - `synapse: StreamPromptingSynapse`: The synapse used to query the network. This contains full context and metadata about the query.
89
-
90
- The `StreamPromptingSynapse` object contains the following fields:
91
- - `uid: int`: The unique identifier of the synapse
92
- - `completion: str`: The final response from the network
93
- - `timing: float`: The total time taken to receive the final response
 
 
 
 
 
 
 
 
 
 
94
 
95
  > Note: The API is subject to change as the project evolves.
96
 
97
-
98
  ## Testing
99
-
100
  To test the API locally, you can use the following curl command:
101
 
102
  ```bash
 
1
 
2
  <picture>
3
  <source srcset="./assets/macrocosmos-white.png" media="(prefers-color-scheme: dark)">
4
+ <img src="assets/macrocosmos-white.png">
5
  </picture>
6
 
7
  <picture>
8
  <source srcset="./assets/macrocosmos-black.png" media="(prefers-color-scheme: light)">
9
+ <img src="assets/macrocosmos-black.png">
10
  </picture>
11
 
12
 
 
75
  ## API Usage
76
  At present, the API provides two endpoints: `/chat` (live) and `/echo` (test).
77
 
78
+ `/chat` is used to chat with the network and receive a response. It requires a JSON payload structured as per the QueryValidatorParams class.
79
+ The request payload requires the following parameters encapsulated within the [`QueryValidatorParams`](./validators/base.py) data class:
80
+ - `k_miners: int`: The number of miners from which to request responses.
81
+ - `exclude: List[str]`: A list of roles or agents to exclude from querying.
82
+ - `roles: List[str]`: The roles of the agents to query.
83
+ - `messages: List[str]`: The messages to be sent to the network.
84
+ - `timeout: int`: The time in seconds to wait for a response.
85
+ - `prefer: str`: The preferred response format, can be either `'longest'` or `'shortest'`.
86
+ - `request: Request`: The original request object encapsulating all request data.
87
+ - `sampling_mode: str`: The mode of sampling to use, defaults to `"random"`. Can be either `"random"` or `"top_incentive"`
88
+
89
+ Responses from the `/chat` endpoint are handled by two classes: `StreamChunk` and `StreamError`, with their attributes defined as follows:
90
+ - `StreamChunk`:
91
+ - `delta: str`: The new chunk of response received.
92
+ - `finish_reason: Optional[str]`: The reason for the response completion, if applicable Can be `None`, `finished` or `error` (check StreamError below).
93
+ - `accumulated_chunks: List[str]`: All chunks of responses accumulated thus far.
94
+ - `accumulated_chunks_timings: List[float]`: Timing for each chunk received.
95
+ - `timestamp: str`: The timestamp at which the chunk was processed.
96
+ - `sequence_number: int`: A sequential identifier for the response part.
97
+ - `selected_uid: int`: The identifier for the selected response source.
98
+
99
+ - `StreamError`:
100
+ - `error: str`: Description of the error occurred.
101
+ - `timestamp: str`: The timestamp of the error.
102
+ - `sequence_number: int`: A sequential identifier for the error.
103
+ - `finish_reason: str`: Always set to `'error'` to indicate an error completion.
104
 
105
  > Note: The API is subject to change as the project evolves.
106
 
 
107
  ## Testing
 
108
  To test the API locally, you can use the following curl command:
109
 
110
  ```bash
responses.py DELETED
@@ -1,27 +0,0 @@
1
- from pydantic import BaseModel, Field
2
- from typing import List, Dict, Any
3
-
4
-
5
- class TextStreamResponse(BaseModel):
6
- streamed_chunks: List[str] = Field(
7
- default_factory=list, description="List of streamed chunks."
8
- )
9
- streamed_chunks_timings: List[float] = Field(
10
- default_factory=list, description="List of streamed chunks timings, in seconds."
11
- )
12
- uid: int = Field(0, description="UID of queried miner")
13
- completion: str = Field(
14
- "", description="The final completed string from the stream."
15
- )
16
- timing: float = Field(
17
- 0, description="Timing information of all request, in seconds."
18
- )
19
-
20
- def to_dict(self):
21
- return {
22
- "streamed_chunks": self.streamed_chunks,
23
- "streamed_chunks_timings": self.streamed_chunks_timings,
24
- "uid": self.uid,
25
- "completion": self.completion,
26
- "timing": self.timing,
27
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils.py CHANGED
@@ -1,12 +1,10 @@
1
  import re
2
- import time
3
- import json
4
  import asyncio
5
  import bittensor as bt
6
  from aiohttp import web
7
- from responses import TextStreamResponse
8
  from collections import Counter
9
  from prompting.rewards import DateRewardModel, FloatDiffModel
 
10
 
11
  UNSUCCESSFUL_RESPONSE_PATTERNS = [
12
  "I'm sorry",
@@ -136,46 +134,27 @@ def guess_task_name(challenge: str):
136
  return "qa"
137
 
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  async def echo_stream(request: web.Request) -> web.StreamResponse:
140
  request_data = request["data"]
141
  k = request_data.get("k", 1)
142
  message = "\n\n".join(request_data["messages"])
143
 
144
- # Create a StreamResponse
145
- response = web.StreamResponse(
146
- status=200, reason="OK", headers={"Content-Type": "application/json"}
147
- )
148
- await response.prepare(request)
149
-
150
- completion = ""
151
- chunks = []
152
- chunks_timings = []
153
- start_time = time.time()
154
- # Echo the message k times with a timeout between each chunk
155
- for _ in range(k):
156
- for word in message.split():
157
- chunk = f"{word} "
158
- await response.write(chunk.encode("utf-8"))
159
- completion += chunk
160
- await asyncio.sleep(0.3)
161
- bt.logging.info(f"Echoed: {chunk}")
162
-
163
- chunks.append(chunk)
164
- chunks_timings.append(time.time() - start_time)
165
-
166
- completion = completion.strip()
167
-
168
- # Prepare final JSON chunk
169
- response_data = TextStreamResponse(
170
- streamed_chunks=chunks,
171
- streamed_chunks_timings=chunks_timings,
172
- completion=completion,
173
- timing=time.time() - start_time,
174
- ).to_dict()
175
-
176
- # Send the final JSON as part of the stream
177
- await response.write(json.dumps(response_data).encode("utf-8"))
178
-
179
- # Finalize the response
180
- await response.write_eof()
181
- return response
 
1
  import re
 
 
2
  import asyncio
3
  import bittensor as bt
4
  from aiohttp import web
 
5
  from collections import Counter
6
  from prompting.rewards import DateRewardModel, FloatDiffModel
7
+ from validators.streamer import AsyncResponseDataStreamer
8
 
9
  UNSUCCESSFUL_RESPONSE_PATTERNS = [
10
  "I'm sorry",
 
134
  return "qa"
135
 
136
 
137
+ # Simulate the stream synapse for the echo endpoint
138
+ class EchoAsyncIterator:
139
+ def __init__(self, message: str, k: int, delay: float):
140
+ self.message = message
141
+ self.k = k
142
+ self.delay = delay
143
+
144
+ async def __aiter__(self):
145
+ for _ in range(self.k):
146
+ for word in self.message.split():
147
+ yield [word]
148
+ await asyncio.sleep(self.delay)
149
+
150
+
151
  async def echo_stream(request: web.Request) -> web.StreamResponse:
152
  request_data = request["data"]
153
  k = request_data.get("k", 1)
154
  message = "\n\n".join(request_data["messages"])
155
 
156
+
157
+ echo_iterator = EchoAsyncIterator(message, k, delay=0.3)
158
+ streamer = AsyncResponseDataStreamer(echo_iterator, selected_uid=0, delay=0.3)
159
+
160
+ return await streamer.stream(request)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
validators/__init__.py CHANGED
@@ -1,2 +1,3 @@
1
  from .base import QueryValidatorParams, ValidatorAPI, MockValidator
2
  from .sn1_validator_wrapper import S1ValidatorAPI
 
 
1
  from .base import QueryValidatorParams, ValidatorAPI, MockValidator
2
  from .sn1_validator_wrapper import S1ValidatorAPI
3
+ from .streamer import AsyncResponseDataStreamer
validators/base.py CHANGED
@@ -13,6 +13,7 @@ class QueryValidatorParams:
13
  timeout: int
14
  prefer: str
15
  request: Request
 
16
 
17
  @staticmethod
18
  def from_request(request: Request):
@@ -26,6 +27,7 @@ class QueryValidatorParams:
26
  timeout=data.get("timeout", 10),
27
  prefer=data.get("prefer", "longest"),
28
  request=request,
 
29
  )
30
 
31
 
 
13
  timeout: int
14
  prefer: str
15
  request: Request
16
+ sampling_mode: str
17
 
18
  @staticmethod
19
  def from_request(request: Request):
 
27
  timeout=data.get("timeout", 10),
28
  prefer=data.get("prefer", "longest"),
29
  request=request,
30
+ sampling_mode=data.get("sampling_mode", "random"),
31
  )
32
 
33
 
validators/sn1_validator_wrapper.py CHANGED
@@ -1,182 +1,58 @@
1
- import json
2
- import utils
3
- import torch
4
- import traceback
5
- import time
6
  import random
7
  import bittensor as bt
8
- from typing import Awaitable
9
  from prompting.validator import Validator
10
  from prompting.utils.uids import get_random_uids
11
- from prompting.protocol import PromptingSynapse, StreamPromptingSynapse
12
- from prompting.dendrite import DendriteResponseEvent
13
  from .base import QueryValidatorParams, ValidatorAPI
14
  from aiohttp.web_response import Response, StreamResponse
15
- from deprecated import deprecated
16
- from dataclasses import dataclass
17
- from typing import List
18
- from responses import TextStreamResponse
19
-
20
-
21
- @dataclass
22
- class ProcessedStreamResponse:
23
- streamed_chunks: List[str]
24
- streamed_chunks_timings: List[float]
25
- synapse: StreamPromptingSynapse
26
 
27
 
28
  class S1ValidatorAPI(ValidatorAPI):
29
  def __init__(self):
30
  self.validator = Validator()
31
 
32
- @deprecated(
33
- reason="This function is deprecated. Validators use stream synapse now, use get_stream_response instead."
34
- )
35
- async def get_response(self, params: QueryValidatorParams) -> Response:
36
- try:
37
- # Guess the task name of current request
38
- task_name = utils.guess_task_name(params.messages[-1])
39
-
40
- # Get the list of uids to query for this step.
41
  uids = get_random_uids(
42
  self.validator, k=params.k_miners, exclude=params.exclude or []
43
  ).tolist()
44
- axons = [self.validator.metagraph.axons[uid] for uid in uids]
45
-
46
- # Make calls to the network with the prompt.
47
- bt.logging.info(f"Calling dendrite")
48
- responses = await self.validator.dendrite(
49
- axons=axons,
50
- synapse=PromptingSynapse(roles=params.roles, messages=params.messages),
51
- timeout=params.timeout,
52
- )
53
-
54
- bt.logging.info(f"Creating DendriteResponseEvent:\n {responses}")
55
- # Encapsulate the responses in a response event (dataclass)
56
- response_event = DendriteResponseEvent(
57
- responses, torch.LongTensor(uids), params.timeout
58
- )
59
-
60
- # convert dict to json
61
- response = response_event.__state_dict__()
62
-
63
- response["completion_is_valid"] = valid = list(
64
- map(utils.completion_is_valid, response["completions"])
65
- )
66
- valid_completions = [
67
- response["completions"][i] for i, v in enumerate(valid) if v
68
- ]
69
-
70
- response["task_name"] = task_name
71
- response["ensemble_result"] = utils.ensemble_result(
72
- valid_completions, task_name=task_name, prefer=params.prefer
73
- )
74
-
75
- bt.logging.info(f"Response:\n {response}")
76
- return Response(
77
- status=200,
78
- reason="I can't believe it's not butter!",
79
- text=json.dumps(response),
80
- )
81
-
82
- except Exception:
83
- bt.logging.error(
84
- f"Encountered in {self.__class__.__name__}:get_response:\n{traceback.format_exc()}"
85
- )
86
- return Response(status=500, reason="Internal error")
87
-
88
- async def process_response(
89
- self, response: StreamResponse, async_generator: Awaitable
90
- ) -> ProcessedStreamResponse:
91
- """Process a single response asynchronously."""
92
- # Initialize chunk with a default value
93
- chunk = None
94
- # Initialize chunk array to accumulate streamed chunks
95
- chunks = []
96
- chunks_timings = []
97
-
98
- start_time = time.time()
99
- last_sent_index = 0
100
- async for chunk in async_generator:
101
- if isinstance(chunk, list):
102
- # Chunks are currently returned in string arrays, so we need to concatenate them
103
- concatenated_chunks = "".join(chunk)
104
- new_data = concatenated_chunks[last_sent_index:]
105
-
106
- if new_data:
107
- await response.write(new_data.encode("utf-8"))
108
- bt.logging.info(f"Received new chunk from miner: {chunk}")
109
- last_sent_index += len(new_data)
110
- chunks.extend(chunk)
111
- chunks_timings.append(time.time() - start_time)
112
-
113
- if chunk is not None and isinstance(chunk, StreamPromptingSynapse):
114
- # Assuming the last chunk holds the last value yielded which should be a synapse with the completion filled
115
- return ProcessedStreamResponse(
116
- synapse=chunk,
117
- streamed_chunks=chunks,
118
- streamed_chunks_timings=chunks_timings,
119
- )
120
- else:
121
- raise ValueError("The last chunkis not a StreamPrompting synapse")
122
 
123
  async def get_stream_response(self, params: QueryValidatorParams) -> StreamResponse:
124
- response = StreamResponse(status=200, reason="OK")
125
- response.headers["Content-Type"] = "application/json"
126
-
127
- await response.prepare(params.request) # Prepare and send the headers
128
-
129
- try:
130
- # Guess the task name of current request
131
- # task_name = utils.guess_task_name(params.messages[-1])
132
-
133
- # Get the list of uids to query for this step.
134
- uids = get_random_uids(
135
- self.validator, k=params.k_miners, exclude=params.exclude or []
136
- ).tolist()
137
- axons = [self.validator.metagraph.axons[uid] for uid in uids]
138
-
139
- # Make calls to the network with the prompt.
140
- bt.logging.info(f"Calling dendrite")
141
- start_time = time.time()
142
-
143
- streams_responses = await self.validator.dendrite(
144
- axons=axons,
145
- synapse=StreamPromptingSynapse(
146
- roles=params.roles, messages=params.messages
147
- ),
148
- timeout=params.timeout,
149
- deserialize=False,
150
- streaming=True,
151
- )
152
-
153
- uid_stream_dict = dict(zip(uids, streams_responses))
154
-
155
- random_uid, random_stream = random.choice(list(uid_stream_dict.items()))
156
- processed_response = await self.process_response(response, random_stream)
157
-
158
- # Prepare final JSON chunk
159
- response_data = json.dumps(
160
- TextStreamResponse(
161
- streamed_chunks=processed_response.streamed_chunks,
162
- streamed_chunks_timings=processed_response.streamed_chunks_timings,
163
- uid=random_uid,
164
- completion=processed_response.synapse.completion,
165
- timing=time.time() - start_time,
166
- ).to_dict()
167
- )
168
-
169
- # Send the final JSON as part of the stream
170
- await response.write(json.dumps(response_data).encode("utf-8"))
171
- except Exception as e:
172
- bt.logging.error(
173
- f"Encountered an error in {self.__class__.__name__}:get_stream_response:\n{traceback.format_exc()}"
174
- )
175
- response.set_status(500, reason="Internal error")
176
- await response.write(json.dumps({"error": str(e)}).encode("utf-8"))
177
- finally:
178
- await response.write_eof() # Ensure to close the response properly
179
-
180
  return response
181
 
182
  async def query_validator(self, params: QueryValidatorParams) -> Response:
 
 
 
 
 
 
1
  import random
2
  import bittensor as bt
 
3
  from prompting.validator import Validator
4
  from prompting.utils.uids import get_random_uids
5
+ from prompting.protocol import StreamPromptingSynapse
 
6
  from .base import QueryValidatorParams, ValidatorAPI
7
  from aiohttp.web_response import Response, StreamResponse
8
+ from .streamer import AsyncResponseDataStreamer
9
+ from .validator_utils import get_top_incentive_uids
 
 
 
 
 
 
 
 
 
10
 
11
 
12
  class S1ValidatorAPI(ValidatorAPI):
13
  def __init__(self):
14
  self.validator = Validator()
15
 
16
+ def sample_uids(self, params: QueryValidatorParams):
17
+ if params.sampling_mode == "random":
 
 
 
 
 
 
 
18
  uids = get_random_uids(
19
  self.validator, k=params.k_miners, exclude=params.exclude or []
20
  ).tolist()
21
+ return uids
22
+ if params.sampling_mode == "top_incentive":
23
+ metagraph = self.validator.metagraph
24
+ vpermit_tao_limit = self.validator.config.neuron.vpermit_tao_limit
25
+
26
+ top_uids = get_top_incentive_uids(metagraph, k=params.k_miners, vpermit_tao_limit=vpermit_tao_limit)
27
+
28
+ return top_uids
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  async def get_stream_response(self, params: QueryValidatorParams) -> StreamResponse:
31
+ # Guess the task name of current request
32
+ # task_name = utils.guess_task_name(params.messages[-1])
33
+
34
+ # Get the list of uids to query for this step.
35
+ uids = self.sample_uids(params)
36
+ axons = [self.validator.metagraph.axons[uid] for uid in uids]
37
+
38
+ # Make calls to the network with the prompt.
39
+ bt.logging.info(f"Calling dendrite")
40
+
41
+ streams_responses = await self.validator.dendrite(
42
+ axons=axons,
43
+ synapse=StreamPromptingSynapse(
44
+ roles=params.roles, messages=params.messages
45
+ ),
46
+ timeout=params.timeout,
47
+ deserialize=False,
48
+ streaming=True,
49
+ )
50
+ uid_stream_dict = dict(zip(uids, streams_responses))
51
+ random_uid, random_stream = random.choice(list(uid_stream_dict.items()))
52
+
53
+ # Creates a streamer from the selected stream
54
+ streamer = AsyncResponseDataStreamer(async_iterator=random_stream, selected_uid=random_uid)
55
+ response = await streamer.stream(params.request)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  return response
57
 
58
  async def query_validator(self, params: QueryValidatorParams) -> Response:
validators/streamer.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ import traceback
4
+ import bittensor as bt
5
+ from pydantic import BaseModel
6
+ from datetime import datetime
7
+ from typing import AsyncIterator, Optional, List
8
+ from aiohttp import web, web_response
9
+ from prompting.protocol import StreamPromptingSynapse
10
+
11
+
12
+ class StreamChunk(BaseModel):
13
+ delta: str
14
+ finish_reason: Optional[str]
15
+ accumulated_chunks: List[str]
16
+ accumulated_chunks_timings: List[float]
17
+ timestamp: str
18
+ sequence_number: int
19
+ selected_uid: int
20
+
21
+ def encode(self, encoding: str) -> bytes:
22
+ data = json.dumps(self.dict(), indent=4)
23
+ return data.encode(encoding)
24
+
25
+
26
+ class StreamError(BaseModel):
27
+ error: str
28
+ timestamp: str
29
+ sequence_number: int
30
+ finish_reason: str = 'error'
31
+
32
+ def encode(self, encoding: str) -> bytes:
33
+ data = json.dumps(self.dict(), indent=4)
34
+ return data.encode(encoding)
35
+
36
+
37
+ class AsyncResponseDataStreamer:
38
+ def __init__(self, async_iterator: AsyncIterator, selected_uid:int, delay: float = 0.1):
39
+ self.async_iterator = async_iterator
40
+ self.delay = delay
41
+ self.selected_uid = selected_uid
42
+ self.accumulated_chunks: List[str] = []
43
+ self.accumulated_chunks_timings: List[float] = []
44
+ self.finish_reason: str = None
45
+ self.sequence_number: int = 0
46
+
47
+ async def stream(self, request: web.Request) -> web_response.StreamResponse:
48
+ response = web_response.StreamResponse(status=200, reason="OK")
49
+ response.headers["Content-Type"] = "application/json"
50
+ await response.prepare(request) # Prepare and send the headers
51
+
52
+ try:
53
+ start_time = time.time()
54
+ async for chunk in self.async_iterator:
55
+ if isinstance(chunk, list):
56
+ # Chunks are currently returned in string arrays, so we need to concatenate them
57
+ concatenated_chunks = "".join(chunk)
58
+ self.accumulated_chunks.append(concatenated_chunks)
59
+ self.accumulated_chunks_timings.append(time.time() - start_time)
60
+ # Gets new response state
61
+ self.sequence_number += 1
62
+ new_response_state = self._create_chunk_response(concatenated_chunks)
63
+ # Writes the new response state to the response
64
+ await response.write(new_response_state.encode('utf-8'))
65
+
66
+ if chunk is not None and isinstance(chunk, StreamPromptingSynapse):
67
+ self.finish_reason = "completed"
68
+ self.sequence_number += 1
69
+ # Assuming the last chunk holds the last value yielded which should be a synapse with the completion filled
70
+ synapse = chunk
71
+ final_state = self._create_chunk_response(synapse.completion)
72
+ await response.write(final_state.encode('utf-8'))
73
+
74
+ except Exception as e:
75
+ bt.logging.error(
76
+ f"Encountered an error in {self.__class__.__name__}:get_stream_response:\n{traceback.format_exc()}"
77
+ )
78
+ response.set_status(500, reason="Internal error")
79
+ error_response = self._create_error_response(str(e))
80
+ response.write(error_response.encode('utf-8'))
81
+ finally:
82
+ await response.write_eof() # Ensure to close the response properly
83
+ return response
84
+
85
+ def _create_chunk_response(self, chunk: str) -> StreamChunk:
86
+ """
87
+ Creates a StreamChunk object with the current state.
88
+
89
+ :param chunk: List of strings representing the current chunk.
90
+ :return: StreamChunk object.
91
+ """
92
+ return StreamChunk(
93
+ delta=chunk,
94
+ finish_reason=self.finish_reason,
95
+ accumulated_chunks=self.accumulated_chunks,
96
+ accumulated_chunks_timings=self.accumulated_chunks_timings,
97
+ timestamp=self._current_timestamp(),
98
+ sequence_number=self.sequence_number,
99
+ selected_uid=self.selected_uid
100
+ )
101
+
102
+ def _create_error_response(self, error_message: str) -> StreamError:
103
+ """
104
+ Creates a StreamError object.
105
+
106
+ :param error_message: Error message to include in the StreamError.
107
+ :return: StreamError object.
108
+ """
109
+ return StreamError(
110
+ error=error_message,
111
+ timestamp=self._current_timestamp(),
112
+ sequence_number=self.sequence_number
113
+ )
114
+
115
+ def _current_timestamp(self) -> str:
116
+ """
117
+ Returns the current timestamp in ISO format.
118
+
119
+ :return: Current timestamp as a string.
120
+ """
121
+ return datetime.utcnow().isoformat()
validators/validator_utils.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from prompting.utils.uids import check_uid_availability
3
+
4
+
5
+ def get_top_incentive_uids(metagraph, k: int, vpermit_tao_limit: int) -> List[int]:
6
+ miners_uids = list(map(int, filter(lambda uid: check_uid_availability(metagraph, uid, vpermit_tao_limit), metagraph.uids)))
7
+
8
+ # Builds a dictionary of uids and their corresponding incentives
9
+ all_miners_incentives = {
10
+ "miners_uids": miners_uids,
11
+ "incentives": list(map(lambda uid: metagraph.I[uid], miners_uids))
12
+ }
13
+
14
+ # Zip the uids and their corresponding incentives into a list of tuples
15
+ uid_incentive_pairs = list(zip(all_miners_incentives['miners_uids'], all_miners_incentives['incentives']))
16
+
17
+ # Sort the list of tuples by the incentive value in descending order
18
+ uid_incentive_pairs_sorted = sorted(uid_incentive_pairs, key=lambda x: x[1], reverse=True)
19
+
20
+ # Extract the top 10 uids
21
+ top_k_uids = [uid for uid, incentive in uid_incentive_pairs_sorted[:k]]
22
+
23
+ return top_k_uids