text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import requests from .auth import AppCredentials class EmulatorApiClient: @staticmethod async def emulate_oauth_cards( credentials: AppCredentials, emulator_url: str, emulate: bool ) -> bool: token = await credentials.get_token() request_url = ( emulator_url + ("" if emulator_url[-1] == "/" else "/") + f"api/usertoken/emulateOAuthCards?emulate={ str(emulate).lower() }" ) res = requests.post(request_url, headers={"Authorization": f"Bearer { token }"}) if res.status_code == 200: return True raise Exception( f"EmulateOAuthCards failed with status code: { res.status_code }" )
botbuilder-python/libraries/botframework-connector/botframework/connector/emulator_api_client.py/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/emulator_api_client.py", "repo_id": "botbuilder-python", "token_count": 335 }
394
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from ._configuration import TokenApiClientConfiguration from ._token_api_client import TokenApiClient __all__ = ["TokenApiClient", "TokenApiClientConfiguration"] from .version import VERSION __version__ = VERSION
botbuilder-python/libraries/botframework-connector/botframework/connector/token_api/__init__.py/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/token_api/__init__.py", "repo_id": "botbuilder-python", "token_count": 110 }
395
interactions: - request: body: '{"from": {"id": "B21UTEF8S:T03CWQ0QB"}, "type": "message", "recipient": {"id": "U19KH8EHJ:T03CWQ0QB"}, "text": "Activity to be deleted..", "channelId": "slack"}' headers: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['160'] Content-Type: [application/json; charset=utf-8] User-Agent: [python/3.5.3 (Linux-4.11.0-041100-generic-x86_64-with-Ubuntu-17.04-zesty) requests/2.18.1 msrest/0.4.23 azure-botframework-connector/v3.0] method: POST uri: https://slack.botframework.com/v3/conversations/B21UTEF8S%3AT03CWQ0QB%3AD2369CT7C/activities response: body: {string: "{\r\n \"id\": \"1514312541.000062\"\r\n}"} headers: cache-control: [no-cache] content-length: ['33'] content-type: [application/json; charset=utf-8] date: ['Tue, 26 Dec 2017 18:22:21 GMT'] expires: ['-1'] pragma: [no-cache] request-context: ['appId=cid-v1:6814484e-c0d5-40ea-9dba-74ff29ca4f62'] server: [Microsoft-IIS/10.0] strict-transport-security: [max-age=31536000] vary: [Accept-Encoding] x-powered-by: [ASP.NET] status: {code: 200, message: OK} - request: body: null headers: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['0'] Content-Type: [application/json; charset=utf-8] User-Agent: [python/3.5.3 (Linux-4.11.0-041100-generic-x86_64-with-Ubuntu-17.04-zesty) requests/2.18.1 msrest/0.4.23 azure-botframework-connector/v3.0] method: DELETE uri: https://slack.botframework.com/v3/conversations/B21UTEF8S%3AT03CWQ0QB%3AD2369CT7C/activities/1514312541.000062 response: body: {string: '{}'} headers: cache-control: [no-cache] content-length: ['2'] content-type: [application/json; charset=utf-8] date: ['Tue, 26 Dec 2017 18:22:21 GMT'] expires: ['-1'] pragma: [no-cache] request-context: ['appId=cid-v1:6814484e-c0d5-40ea-9dba-74ff29ca4f62'] server: [Microsoft-IIS/10.0] strict-transport-security: [max-age=31536000] vary: [Accept-Encoding] x-powered-by: [ASP.NET] status: {code: 200, message: OK} version: 1
botbuilder-python/libraries/botframework-connector/tests/recordings/test_conversations_delete_activity.yaml/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/tests/recordings/test_conversations_delete_activity.yaml", "repo_id": "botbuilder-python", "token_count": 1106 }
396
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from asyncio import Event, ensure_future, iscoroutinefunction, isfuture from typing import Awaitable, Callable, List from botframework.streaming.transport import ( DisconnectedEventArgs, TransportSenderBase, TransportConstants, ) from botframework.streaming.payloads import HeaderSerializer from botframework.streaming.payloads.models import Header from .send_queue import SendQueue from .send_packet import SendPacket # TODO: consider interface this class class PayloadSender: def __init__(self): self._connected_event = Event() self._sender: TransportSenderBase = None self._is_disconnecting: bool = False self._send_header_buffer: List[int] = [ None ] * TransportConstants.MAX_HEADER_LENGTH self._send_content_buffer: List[int] = [ None ] * TransportConstants.MAX_PAYLOAD_LENGTH self._send_queue = SendQueue(action=self._write_packet) self.disconnected: Callable[[object, DisconnectedEventArgs], None] = None @property def is_connected(self) -> bool: return self._sender is not None def connect(self, sender: TransportSenderBase): if self._sender: raise RuntimeError(f"{self.__class__.__name__} instance already connected.") self._sender = sender self._connected_event.set() # TODO: check 'stream' for payload def send_payload( self, header: Header, payload: object, is_length_known: bool, sent_callback: Callable[[Header], Awaitable], ): packet = SendPacket( header=header, payload=payload, is_length_known=is_length_known, sent_callback=sent_callback, ) self._send_queue.post(packet) async def disconnect(self, event_args: DisconnectedEventArgs = None): did_disconnect = False if not self._is_disconnecting: self._is_disconnecting = True try: try: if self._sender: self._sender.close() # TODO: investigate if 'dispose' is necessary did_disconnect = True except Exception: pass self._sender = None if did_disconnect: self._connected_event.clear() if callable(self.disconnected): # pylint: disable=not-callable if iscoroutinefunction(self.disconnected) or isfuture( self.disconnected ): await self.disconnected( self, event_args or DisconnectedEventArgs.empty ) else: self.disconnected( self, event_args or DisconnectedEventArgs.empty ) finally: self._is_disconnecting = False async def _write_packet(self, packet: SendPacket): await self._connected_event.wait() try: # determine if we know the payload length and end if not packet.is_length_known: count = packet.header.payload_length packet.header.end = count == 0 header_length = HeaderSerializer.serialize( packet.header, self._send_header_buffer, 0 ) # Send: Packet Header length = await self._sender.send(self._send_header_buffer, 0, header_length) if not length: # TODO: make custom exception raise Exception("TransportDisconnectedException") offset = 0 # Send content in chunks if packet.header.payload_length and packet.payload: # If we already read the buffer, send that # If we did not, read from the stream until we've sent that amount if not packet.is_length_known: # Send: Packet content length = await self._sender.send( self._send_content_buffer, 0, packet.header.payload_length ) if length == 0: # TODO: make custom exception raise Exception("TransportDisconnectedException") else: while offset < packet.header.payload_length: count = min( packet.header.payload_length - offset, TransportConstants.MAX_PAYLOAD_LENGTH, ) # copy the stream to the buffer # TODO: this has to be improved in custom buffer class (validate buffer ended) for index in range(count): self._send_content_buffer[index] = packet.payload[index] # Send: Packet content length = await self._sender.send( self._send_content_buffer, 0, count ) if length == 0: # TODO: make custom exception raise Exception("TransportDisconnectedException") offset += count if packet.sent_callback: # TODO: should this really run in the background? ensure_future(packet.sent_callback(packet.header)) except Exception as exception: disconnected_args = DisconnectedEventArgs(reason=str(exception)) await self.disconnect(disconnected_args)
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payload_transport/payload_sender.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payload_transport/payload_sender.py", "repo_id": "botbuilder-python", "token_count": 2926 }
397
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from uuid import UUID from typing import List from botframework.streaming.transport import TransportConstants from .models import Header _CHAR_TO_BINARY_INT = {val.decode(): list(val)[0] for val in [b".", b"\n", b"1", b"0"]} # TODO: consider abstracting the binary int list logic into a class for easier handling class HeaderSerializer: DELIMITER = _CHAR_TO_BINARY_INT["."] TERMINATOR = _CHAR_TO_BINARY_INT["\n"] END = _CHAR_TO_BINARY_INT["1"] NOT_END = _CHAR_TO_BINARY_INT["0"] TYPE_OFFSET = 0 TYPE_DELIMITER_OFFSET = 1 LENGTH_OFFSET = 2 LENGTH_LENGTH = 6 LENGTH_DELIMETER_OFFSET = 8 ID_OFFSET = 9 ID_LENGTH = 36 ID_DELIMETER_OFFSET = 45 END_OFFSET = 46 TERMINATOR_OFFSET = 47 @staticmethod def serialize( header: Header, buffer: List[int], offset: int, # pylint: disable=unused-argument ) -> int: # write type buffer[HeaderSerializer.TYPE_OFFSET] = HeaderSerializer._char_to_binary_int( header.type ) buffer[HeaderSerializer.TYPE_DELIMITER_OFFSET] = HeaderSerializer.DELIMITER # write length length_binary_array: List[int] = list( HeaderSerializer._int_to_formatted_encoded_str( header.payload_length, "{:06d}" ) ) HeaderSerializer._write_in_buffer( length_binary_array, buffer, HeaderSerializer.LENGTH_OFFSET ) buffer[HeaderSerializer.LENGTH_DELIMETER_OFFSET] = HeaderSerializer.DELIMITER # write id id_binary_array: List[int] = list( HeaderSerializer._uuid_to_numeric_encoded_str(header.id) ) HeaderSerializer._write_in_buffer( id_binary_array, buffer, HeaderSerializer.ID_OFFSET ) buffer[HeaderSerializer.ID_DELIMETER_OFFSET] = HeaderSerializer.DELIMITER # write terminator buffer[HeaderSerializer.END_OFFSET] = ( HeaderSerializer.END if header.end else HeaderSerializer.NOT_END ) buffer[HeaderSerializer.TERMINATOR_OFFSET] = HeaderSerializer.TERMINATOR return TransportConstants.MAX_HEADER_LENGTH @staticmethod def deserialize( buffer: List[int], offset: int, count: int # pylint: disable=unused-argument ) -> Header: if count != TransportConstants.MAX_HEADER_LENGTH: raise ValueError("Cannot deserialize header, incorrect length") header = Header( type=HeaderSerializer._binary_int_to_char( buffer[HeaderSerializer.TYPE_OFFSET] ) ) if buffer[HeaderSerializer.TYPE_DELIMITER_OFFSET] != HeaderSerializer.DELIMITER: raise ValueError("Header type delimeter is malformed") length_str = HeaderSerializer._binary_array_to_str( buffer[ HeaderSerializer.LENGTH_OFFSET : HeaderSerializer.LENGTH_OFFSET + HeaderSerializer.LENGTH_LENGTH ] ) try: length = int(length_str) except Exception: raise ValueError("Header length is malformed") header.payload_length = length if ( buffer[HeaderSerializer.LENGTH_DELIMETER_OFFSET] != HeaderSerializer.DELIMITER ): raise ValueError("Header length delimeter is malformed") identifier_str = HeaderSerializer._binary_array_to_str( buffer[ HeaderSerializer.ID_OFFSET : HeaderSerializer.ID_OFFSET + HeaderSerializer.ID_LENGTH ] ) try: identifier = UUID(identifier_str) except Exception: raise ValueError("Header id is malformed") header.id = identifier if buffer[HeaderSerializer.ID_DELIMETER_OFFSET] != HeaderSerializer.DELIMITER: raise ValueError("Header id delimeter is malformed") if buffer[HeaderSerializer.END_OFFSET] not in [ HeaderSerializer.END, HeaderSerializer.NOT_END, ]: raise ValueError("Header end is malformed") header.end = buffer[HeaderSerializer.END_OFFSET] == HeaderSerializer.END if buffer[HeaderSerializer.TERMINATOR_OFFSET] != HeaderSerializer.TERMINATOR: raise ValueError("Header terminator is malformed") return header @staticmethod def _char_to_binary_int(char: str) -> int: if len(char) != 1: raise ValueError("Char to cast should be a str of exactly length 1") unicode_list = list(char.encode()) if len(unicode_list) != 1: raise ValueError("Char to cast should be in the ASCII domain") return unicode_list[0] @staticmethod def _int_to_formatted_encoded_str(value: int, str_format: str) -> bytes: return str_format.format(value).encode("ascii") @staticmethod def _uuid_to_numeric_encoded_str(value: UUID) -> bytes: return str(value).encode("ascii") @staticmethod def _binary_int_to_char(binary_int: int) -> str: return bytes([binary_int]).decode("ascii") @staticmethod def _binary_array_to_str(binary_array: List[int]) -> str: return bytes(binary_array).decode("ascii") @staticmethod def _write_in_buffer(data: List[int], buffer: List[int], insert_index: int): for byte_int in data: buffer[insert_index] = byte_int insert_index += 1
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py", "repo_id": "botbuilder-python", "token_count": 2454 }
398
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from abc import ABC from logging import Logger from .receive_request import ReceiveRequest from .streaming_response import StreamingResponse class RequestHandler(ABC): async def process_request( self, request: ReceiveRequest, logger: Logger, context: object ) -> StreamingResponse: raise NotImplementedError()
botbuilder-python/libraries/botframework-streaming/botframework/streaming/request_handler.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/request_handler.py", "repo_id": "botbuilder-python", "token_count": 122 }
399
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import traceback from typing import List from botframework.streaming.transport import TransportReceiverBase, TransportSenderBase from .web_socket import WebSocket from .web_socket_message_type import WebSocketMessageType from .web_socket_close_status import WebSocketCloseStatus from .web_socket_state import WebSocketState class WebSocketTransport(TransportReceiverBase, TransportSenderBase): def __init__(self, web_socket: WebSocket): self._socket = web_socket @property def is_connected(self): # TODO: mock logic return self._socket.status == WebSocketState.OPEN async def close(self): # TODO: mock logic if self._socket.status == WebSocketState.OPEN: try: await self._socket.close( WebSocketCloseStatus.NORMAL_CLOSURE, "Closed by the WebSocketTransport", ) except Exception: # pylint: disable=pointless-string-statement """ Any exception thrown here will be caused by the socket already being closed, which is the state we want to put it in by calling this method, which means we don't care if it was already closed and threw an exception when we tried to close it again. """ traceback.print_exc() # TODO: might need to remove offset and count if no segmentation possible # TODO: considering to create a BFTransportBuffer class to abstract the logic of binary buffers adapting to # current interfaces async def receive( self, buffer: List[int], offset: int = 0, count: int = None ) -> int: try: if self._socket: result = await self._socket.receive() buffer_index = offset result_length = count if count is not None else len(result.data) for result_index in range(result_length): buffer[buffer_index] = result.data[result_index] buffer_index += 1 if result.message_type == WebSocketMessageType.CLOSE: await self._socket.close( WebSocketCloseStatus.NORMAL_CLOSURE, "Socket closed" ) # Depending on ws implementation library next line might not be necessary if self._socket.status == WebSocketState.CLOSED: self._socket.dispose() return result_length except Exception as error: # Exceptions of the three types below will also have set the socket's state to closed, which fires an # event consumers of this class are subscribed to and have handling around. Any other exception needs to # be thrown to cause a non-transport-connectivity failure. raise error # TODO: might need to remove offset and count if no segmentation possible (or put them in BFTransportBuffer) async def send(self, buffer: List[int], offset: int = 0, count: int = None) -> int: try: if self._socket: await self._socket.send( buffer[offset:count] if count is not None else buffer, WebSocketMessageType.BINARY, True, ) return count or len(buffer) except Exception as error: # Exceptions of the three types below will also have set the socket's state to closed, which fires an # event consumers of this class are subscribed to and have handling around. Any other exception needs to # be thrown to cause a non-transport-connectivity failure. traceback.print_exc() raise error return 0
botbuilder-python/libraries/botframework-streaming/botframework/streaming/transport/web_socket/web_socket_transport.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/transport/web_socket/web_socket_transport.py", "repo_id": "botbuilder-python", "token_count": 1617 }
400
variables: # Container registry service connection established during pipeline creation CI_PULL_REQUEST: $(System.PullRequest.PullRequestId) COVERALLS_FLAG_NAME: Build \# $(Build.BuildNumber) COVERALLS_GIT_BRANCH: $(Build.SourceBranchName) COVERALLS_GIT_COMMIT: $(Build.SourceVersion) COVERALLS_SERVICE_JOB_ID: $(Build.BuildId) COVERALLS_SERVICE_NAME: python-ci python.38: 3.8 python.39: 3.9 python.310: 3.10 python.311: 3.11 # PythonCoverallsToken: get this from Azure jobs: # Build and publish container - job: Build #Multi-configuration and multi-agent job options are not exported to YAML. Configure these options using documentation guidance: https://docs.microsoft.com/vsts/pipelines/process/phases pool: vmImage: 'ubuntu-latest' strategy: matrix: Python38: PYTHON_VERSION: '$(python.38)' Python39: PYTHON_VERSION: '$(python.39)' Python310: PYTHON_VERSION: '$(python.310)' Python311: PYTHON_VERSION: '$(python.311)' maxParallel: 3 steps: - powershell: | Get-ChildItem env:* | sort-object name | Format-Table -Autosize -Wrap | Out-String -Width 120 displayName: 'Get environment vars' - task: UsePythonVersion@0 displayName: 'Use Python $(PYTHON_VERSION)' inputs: versionSpec: '$(PYTHON_VERSION)' - script: | python -m pip install --upgrade pip pip install -e ./libraries/botbuilder-schema pip install -e ./libraries/botframework-connector pip install -e ./libraries/botframework-streaming pip install -e ./libraries/botbuilder-core pip install -e ./libraries/botbuilder-ai pip install -e ./libraries/botbuilder-applicationinsights pip install -e ./libraries/botbuilder-dialogs pip install -e ./libraries/botbuilder-azure pip install -e ./libraries/botbuilder-testing pip install -e ./libraries/botbuilder-integration-applicationinsights-aiohttp pip install -e ./libraries/botbuilder-adapters-slack pip install -e ./libraries/botbuilder-integration-aiohttp pip install -r ./libraries/botframework-connector/tests/requirements.txt pip install -r ./libraries/botbuilder-core/tests/requirements.txt pip install -r ./libraries/botbuilder-ai/tests/requirements.txt pip install coveralls pip install pylint==2.17 pip install black==22.3.0 displayName: 'Install dependencies' - script: 'black --check libraries' displayName: 'Check Black compliant' - script: 'pylint --rcfile=.pylintrc libraries' displayName: Pylint - script: | pip install pytest pip install pytest-cov pip install coveralls pytest --junitxml=junit/test-results.$(PYTHON_VERSION).xml --cov-config=.coveragerc --cov --cov-report=xml --cov-report=html --ignore=libraries/functional-tests/tests/test_slack_client.py displayName: Pytest - task: PublishCodeCoverageResults@1 displayName: 'Publish Test Coverage' inputs: codeCoverageTool: Cobertura summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' reportDirectory: '$(System.DefaultWorkingDirectory)/**/htmlcov' - task: PublishTestResults@2 displayName: 'Publish Test Results **/test-results.$(PYTHON_VERSION).xml' inputs: testResultsFiles: '**/test-results.$(PYTHON_VERSION).xml' testRunTitle: 'Python $(PYTHON_VERSION)' - script: 'COVERALLS_REPO_TOKEN=$(PythonCoverallsToken) coveralls' displayName: 'Push test results to coveralls https://coveralls.io/github/microsoft/botbuilder-python' continueOnError: true condition: and(succeeded(), eq(variables['System.PullRequest.IsFork'], 'false')) - powershell: | Set-Location .. Get-ChildItem -Recurse -Force displayName: 'Dir workspace' condition: succeededOrFailed() - powershell: | # This task copies the code coverage file created by dotnet test into a well known location. In all # checks I've done, dotnet test ALWAYS outputs the coverage file to the temp directory. # My attempts to override this and have it go directly to the CodeCoverage directory have # all failed, so I'm just doing the copy here. (cmullins) Get-ChildItem -Path "$(Build.SourcesDirectory)" -Include "*coverage*" | Copy-Item -Destination "$(Build.ArtifactStagingDirectory)/CodeCoverage" displayName: 'Copy .coverage Files to CodeCoverage folder' continueOnError: true - task: ms.vss-governance-buildtask.governance-build-task-component-detection.ComponentGovernanceComponentDetection@0 displayName: 'Component Detection'
botbuilder-python/pipelines/botbuilder-python-ci.yml/0
{ "file_path": "botbuilder-python/pipelines/botbuilder-python-ci.yml", "repo_id": "botbuilder-python", "token_count": 1666 }
401
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import List from botbuilder.core import ChannelServiceHandler from botbuilder.schema import ( Activity, ChannelAccount, ConversationParameters, ConversationResourceResponse, ConversationsResult, PagedMembersResult, ResourceResponse ) from botframework.connector.aio import ConnectorClient from botframework.connector.auth import ( AuthenticationConfiguration, ChannelProvider, ClaimsIdentity, CredentialProvider, MicrosoftAppCredentials ) from routing_id_factory import RoutingIdFactory class RoutingHandler(ChannelServiceHandler): def __init__( self, conversation_id_factory: RoutingIdFactory, credential_provider: CredentialProvider, auth_configuration: AuthenticationConfiguration, channel_provider: ChannelProvider = None ): super().__init__(credential_provider, auth_configuration, channel_provider) self._factory = conversation_id_factory self._credentials = MicrosoftAppCredentials(None, None) async def on_reply_to_activity( self, claims_identity: ClaimsIdentity, conversation_id: str, activity_id: str, activity: Activity, ) -> ResourceResponse: back_conversation_id, back_service_url = self._factory.get_conversation_info(conversation_id) connector_client = self._get_connector_client(back_service_url) activity.conversation.id = back_conversation_id activity.service_url = back_service_url return await connector_client.conversations.send_to_conversation(back_conversation_id, activity) async def on_send_to_conversation( self, claims_identity: ClaimsIdentity, conversation_id: str, activity: Activity, ) -> ResourceResponse: back_conversation_id, back_service_url = self._factory.get_conversation_info(conversation_id) connector_client = self._get_connector_client(back_service_url) activity.conversation.id = back_conversation_id activity.service_url = back_service_url return await connector_client.conversations.send_to_conversation(back_conversation_id, activity) async def on_update_activity( self, claims_identity: ClaimsIdentity, conversation_id: str, activity_id: str, activity: Activity, ) -> ResourceResponse: back_conversation_id, back_service_url = self._factory.get_conversation_info(conversation_id) connector_client = self._get_connector_client(back_service_url) activity.conversation.id = back_conversation_id activity.service_url = back_service_url return await connector_client.conversations.update_activity(back_conversation_id, activity.id, activity) async def on_delete_activity( self, claims_identity: ClaimsIdentity, conversation_id: str, activity_id: str, ): back_conversation_id, back_service_url = self._factory.get_conversation_info(conversation_id) connector_client = self._get_connector_client(back_service_url) return await connector_client.conversations.delete_activity(back_conversation_id, activity_id) async def on_create_conversation( self, claims_identity: ClaimsIdentity, parameters: ConversationParameters, ) -> ConversationResourceResponse: # This call will be used in Teams scenarios. # Scenario #1 - creating a thread with an activity in a Channel in a Team # In order to know the serviceUrl in the case of Teams we would need to look it up based upon the # TeamsChannelData. # The inbound activity will contain the TeamsChannelData and so will the ConversationParameters. # Scenario #2 - starting a one on one conversation with a particular user # - needs further analysis - back_service_url = "http://tempuri" connector_client = self._get_connector_client(back_service_url) return await connector_client.conversations.create_conversation(parameters) async def on_delete_conversation_member( self, claims_identity: ClaimsIdentity, conversation_id: str, member_id: str, ): return await super().on_delete_conversation_member(claims_identity, conversation_id, member_id) async def on_get_activity_members( self, claims_identity: ClaimsIdentity, conversation_id: str, activity_id: str, ) -> List[ChannelAccount]: return await super().on_get_activity_members(claims_identity, conversation_id, activity_id) async def on_get_conversation_members( self, claims_identity: ClaimsIdentity, conversation_id: str, ) -> List[ChannelAccount]: return await super().on_get_conversation_members(claims_identity, conversation_id) async def on_get_conversations( self, claims_identity: ClaimsIdentity, continuation_token: str = "", ) -> ConversationsResult: return await super().on_get_conversations(claims_identity, continuation_token) async def on_get_conversation_paged_members( self, claims_identity: ClaimsIdentity, conversation_id: str, page_size: int = None, continuation_token: str = "", ) -> PagedMembersResult: return await super().on_get_conversation_paged_members(claims_identity, conversation_id, continuation_token) def _get_connector_client(self, service_url: str): return ConnectorClient(self._credentials, service_url)
botbuilder-python/tests/experimental/test-protocol/routing_handler.py/0
{ "file_path": "botbuilder-python/tests/experimental/test-protocol/routing_handler.py", "repo_id": "botbuilder-python", "token_count": 1984 }
402
#!/bin/bash set -e echo "Starting SSH ..." service ssh start # flask run --port 3978 --host 0.0.0.0 python /functionaltestbot/app.py --host 0.0.0.0
botbuilder-python/tests/functional-tests/functionaltestbot/init.sh/0
{ "file_path": "botbuilder-python/tests/functional-tests/functionaltestbot/init.sh", "repo_id": "botbuilder-python", "token_count": 58 }
403
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ainThreedots-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="ain-ar.fina"/> <component base="threedotsupabove-ar" xOffset="10" yOffset="273"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ainT_hreedots-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ainT_hreedots-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 168 }
404
<?xml version='1.0' encoding='UTF-8'?> <glyph name="cedilla" format="2"> <advance width="1200"/> <unicode hex="00B8"/> <outline> <component base="cedillacomb"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>cedillacomb</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/cedilla.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/cedilla.glif", "repo_id": "cascadia-code", "token_count": 275 }
405
<?xml version='1.0' encoding='UTF-8'?> <glyph name="divisionslash" format="2"> <advance width="1200"/> <unicode hex="2215"/> <outline> <component base="slash" xOffset="2"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>slash</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/divisionslash.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/divisionslash.glif", "repo_id": "cascadia-code", "token_count": 278 }
406
<?xml version='1.0' encoding='UTF-8'?> <glyph name="hahTahTwodotshorizontalabove-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="hah-ar.fina"/> <component base="twodotstahcenter-ar" xScale="0.9" yScale="0.9" xOffset="73" yOffset="-330"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.glyph.leftMetricsKey</key> <string>hah-ar.fina</string> <key>com.schriftgestaltung.Glyphs.glyph.rightMetricsKey</key> <string>hah-ar.fina</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/hahT_ahT_wodotshorizontalabove-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/hahT_ahT_wodotshorizontalabove-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 294 }
407
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehTwodotshorizontalabove-ar.fina" format="2"> <advance width="1200"/> <anchor x="625" y="1494" name="top"/> <outline> <component base="keheh-ar.fina"/> <component base="twodotshorizontalabove-ar.v2" xScale="0.7" yScale="0.7" xOffset="206" yOffset="750"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehT_wodotshorizontalabove-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehT_wodotshorizontalabove-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 213 }
408
<?xml version='1.0' encoding='UTF-8'?> <glyph name="lamDotabove-ar.init" format="2"> <advance width="1200"/> <guideline x="305" y="819" angle="0"/> <outline> <component base="lam-ar.init"/> <component base="dotabove-ar" xOffset="234" yOffset="925"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamD_otabove-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamD_otabove-ar.init.glif", "repo_id": "cascadia-code", "token_count": 182 }
409
<?xml version='1.0' encoding='UTF-8'?> <glyph name="meemDotabove-ar.fina" format="2"> <advance width="1200"/> <guideline x="254" y="551" angle="0"/> <outline> <component base="meem-ar.fina"/> <component base="dotabove-ar" xOffset="43" yOffset="262"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/meemD_otabove-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/meemD_otabove-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 185 }
410
<?xml version='1.0' encoding='UTF-8'?> <glyph name="oslash" format="2"> <advance width="1200"/> <unicode hex="00F8"/> <outline> <contour> <point x="301" y="-134" type="line"/> <point x="1036" y="1116" type="line"/> <point x="896" y="1198" type="line"/> <point x="171" y="-42" type="line"/> </contour> <component base="o"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>o</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/oslash.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/oslash.glif", "repo_id": "cascadia-code", "token_count": 372 }
411
<?xml version='1.0' encoding='UTF-8'?> <glyph name="period_period.liga" format="2"> <advance width="1200"/> <outline> <component base="period" xOffset="140"/> <component base="period" xOffset="1060"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>period</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>1</integer> <key>name</key> <string>period</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/period_period.liga.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/period_period.liga.glif", "repo_id": "cascadia-code", "token_count": 410 }
412
<?xml version='1.0' encoding='UTF-8'?> <glyph name="questiongreek" format="2"> <advance width="1200"/> <unicode hex="037E"/> <outline> <component base="semicolon"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>semicolon</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/questiongreek.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/questiongreek.glif", "repo_id": "cascadia-code", "token_count": 276 }
413
<?xml version='1.0' encoding='UTF-8'?> <glyph name="threequarters" format="2"> <advance width="1200"/> <unicode hex="00BE"/> <outline> <contour> <point x="211" y="269" type="line"/> <point x="1121" y="969" type="line"/> <point x="989" y="1143" type="line"/> <point x="79" y="449" type="line"/> </contour> <component base="foursuperior" xOffset="240" yOffset="-802"/> <component base="threesuperior" xOffset="-240"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>foursuperior</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>1</integer> <key>name</key> <string>threesuperior</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/threequarters.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/threequarters.glif", "repo_id": "cascadia-code", "token_count": 531 }
414
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Endescender-cy" format="2"> <advance width="1200"/> <unicode hex="04A2"/> <outline> <contour> <point x="998" y="-319" type="line"/> <point x="1044" y="-188"/> <point x="1111" y="97"/> <point x="1119" y="244" type="curve"/> <point x="885" y="244" type="line"/> <point x="850" y="46" type="line"/> <point x="832" y="-42"/> <point x="803" y="-147"/> <point x="762" y="-270" type="curve"/> </contour> <component base="En-cy"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>En-cy</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-BoldItalic.ufo/glyphs/E_ndescender-cy.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-BoldItalic.ufo/glyphs/E_ndescender-cy.glif", "repo_id": "cascadia-code", "token_count": 468 }
415
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Ycircumflex" format="2"> <advance width="1200"/> <unicode hex="0176"/> <outline> <component base="Y"/> <component base="circumflexcomb.case" xOffset="-5"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/Y_circumflex.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/Y_circumflex.glif", "repo_id": "cascadia-code", "token_count": 98 }
416
<?xml version='1.0' encoding='UTF-8'?> <glyph name="eight-persiansuperior" format="2"> <advance width="1200"/> <outline> <component base="eight-arsuperior" xOffset="6"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>eight-arsuperior</string> </dict> </array> <key>com.schriftgestaltung.Glyphs.category</key> <string>Number</string> <key>com.schriftgestaltung.Glyphs.script</key> <string>arabic</string> <key>com.schriftgestaltung.Glyphs.subCategory</key> <string>Small</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/eight-persiansuperior.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/eight-persiansuperior.glif", "repo_id": "cascadia-code", "token_count": 438 }
417
<?xml version='1.0' encoding='UTF-8'?> <glyph name="gershayim-hb" format="2"> <advance width="1200"/> <unicode hex="05F4"/> <guideline x="1042" y="834" angle="90"/> <outline> <component base="geresh-hb" xOffset="197"/> <component base="geresh-hb" xOffset="-250"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.97,1,0,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/gershayim-hb.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/gershayim-hb.glif", "repo_id": "cascadia-code", "token_count": 191 }
418
<?xml version='1.0' encoding='UTF-8'?> <glyph name="hahHamzaabove-ar" format="2"> <advance width="1200"/> <unicode hex="0681"/> <outline> <component base="hah-ar"/> <component base="hamzaabove-ar" xOffset="-54" yOffset="-200"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/hahH_amzaabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/hahH_amzaabove-ar.glif", "repo_id": "cascadia-code", "token_count": 174 }
419
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tchehDotabove-ar" format="2"> <advance width="1200"/> <unicode hex="06BF"/> <outline> <component base="tcheh-ar"/> <component base="dotabove-ar" xOffset="-34" yOffset="362"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.glyph.leftMetricsKey</key> <string>tcheh-ar</string> <key>com.schriftgestaltung.Glyphs.glyph.rightMetricsKey</key> <string>tcheh-ar</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/tchehD_otabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/tchehD_otabove-ar.glif", "repo_id": "cascadia-code", "token_count": 272 }
420
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Ecircumflexgrave" format="2"> <advance width="1200"/> <unicode hex="1EC0"/> <outline> <component base="E"/> <component base="circumflexcomb.case" xOffset="40"/> <component base="gravecomb.case" xOffset="427" yOffset="248"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/E_circumflexgrave.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/E_circumflexgrave.glif", "repo_id": "cascadia-code", "token_count": 122 }
421
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Ocircumflexgrave" format="2"> <advance width="1200"/> <unicode hex="1ED2"/> <outline> <component base="O"/> <component base="circumflexcomb.case"/> <component base="gravecomb.case" xOffset="387" yOffset="248"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/O_circumflexgrave.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/O_circumflexgrave.glif", "repo_id": "cascadia-code", "token_count": 117 }
422
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Omacronacute" format="2"> <advance width="1200"/> <unicode hex="1E52"/> <outline> <component base="O"/> <component base="macroncomb.case" xOffset="2"/> <component base="acutecomb.case" xOffset="56" yOffset="308"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/O_macronacute.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/O_macronacute.glif", "repo_id": "cascadia-code", "token_count": 123 }
423
<?xml version='1.0' encoding='UTF-8'?> <glyph name="abrevehookabove" format="2"> <advance width="1200"/> <unicode hex="1EB3"/> <outline> <component base="a"/> <component base="brevecomb" xOffset="30"/> <component base="hookabovecomb" xOffset="115" yOffset="480"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/abrevehookabove.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/abrevehookabove.glif", "repo_id": "cascadia-code", "token_count": 117 }
424
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ecircumflexhookabove" format="2"> <advance width="1200"/> <unicode hex="1EC3"/> <anchor x="506" y="0" name="bottom"/> <outline> <component base="e"/> <component base="circumflexcomb" xOffset="15"/> <component base="hookabovecomb" xOffset="373" yOffset="300"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/ecircumflexhookabove.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/ecircumflexhookabove.glif", "repo_id": "cascadia-code", "token_count": 137 }
425
<?xml version='1.0' encoding='UTF-8'?> <glyph name="gje-cy" format="2"> <advance width="1200"/> <unicode hex="0453"/> <outline> <component base="iacute"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/gje-cy.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/gje-cy.glif", "repo_id": "cascadia-code", "token_count": 78 }
426
<?xml version='1.0' encoding='UTF-8'?> <glyph name="less_bar_bar_bar.liga" format="2"> <advance width="1200"/> <outline> <component base="bar" xOffset="2270"/> <component base="bar" xOffset="3239"/> <component base="less_bar.liga" xOffset="141"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/less_bar_bar_bar.liga.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/less_bar_bar_bar.liga.glif", "repo_id": "cascadia-code", "token_count": 113 }
427
<?xml version='1.0' encoding='UTF-8'?> <glyph name="oacute.loclPLK" format="2"> <advance width="1200"/> <outline> <component base="o"/> <component base="acutecomb.loclPLK"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/oacute.loclP_L_K_.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/oacute.loclP_L_K_.glif", "repo_id": "cascadia-code", "token_count": 89 }
428
<?xml version='1.0' encoding='UTF-8'?> <glyph name="seven.dnom" format="2"> <advance width="1200"/> <outline> <component base="sevensuperior" xOffset="-141" yOffset="-802"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/seven.dnom.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/seven.dnom.glif", "repo_id": "cascadia-code", "token_count": 81 }
429
<?xml version='1.0' encoding='UTF-8'?> <glyph name="shha-cy.alt" format="2"> <advance width="1200"/> <outline> <component base="h.alt"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.97,1,0,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/shha-cy.alt.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/shha-cy.alt.glif", "repo_id": "cascadia-code", "token_count": 132 }
430
<?xml version='1.0' encoding='UTF-8'?> <glyph name="threequarters" format="2"> <advance width="1200"/> <unicode hex="00BE"/> <outline> <contour> <point x="168" y="284" type="line"/> <point x="1188" y="1012" type="line"/> <point x="1124" y="1122" type="line"/> <point x="103" y="400" type="line"/> </contour> <component base="threesuperior" xOffset="-260"/> <component base="fourinferior" xOffset="270"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/threequarters.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/threequarters.glif", "repo_id": "cascadia-code", "token_count": 202 }
431
<?xml version='1.0' encoding='UTF-8'?> <glyph name="zero.dnom" format="2"> <advance width="1200"/> <outline> <component base="zerosuperior" xOffset="-143" yOffset="-812"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/zero.dnom.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Italic.ufo/glyphs/zero.dnom.glif", "repo_id": "cascadia-code", "token_count": 83 }
432
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ainTwodotshorizontalabove-ar" format="2"> <advance width="1200"/> <unicode hex="075D"/> <outline> <component base="ain-ar"/> <component base="twodotshorizontalabove-ar" xOffset="-93" yOffset="466"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotshorizontalabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/ainT_wodotshorizontalabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/ainT_wodotshorizontalabove-ar.glif", "repo_id": "cascadia-code", "token_count": 359 }
433
<?xml version='1.0' encoding='UTF-8'?> <glyph name="dahal-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="dal-ar.fina"/> <component base="twodotshorizontalabove-ar" xOffset="36" yOffset="492"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotshorizontalabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/dahal-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/dahal-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 349 }
434
<?xml version='1.0' encoding='UTF-8'?> <glyph name="gueh-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="gaf-ar.fina"/> <component base="twodotsverticalbelow-ar" xOffset="-47" yOffset="-24"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotsverticalbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/gueh-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/gueh-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 349 }
435
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehThreedotsbelow-ar" format="2"> <advance width="1200"/> <unicode hex="063C"/> <outline> <component base="keheh-ar"/> <component base="threedotsdownbelow-ar" xOffset="-45" yOffset="-18"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsdownbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/kehehT_hreedotsbelow-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/kehehT_hreedotsbelow-ar.glif", "repo_id": "cascadia-code", "token_count": 358 }
436
<?xml version='1.0' encoding='UTF-8'?> <glyph name="noon-ar" format="2"> <advance width="1200"/> <unicode hex="0646"/> <outline> <component base="noonghunna-ar"/> <component base="dotabove-ar" xOffset="4" yOffset="11"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>dotabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/noon-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/noon-ar.glif", "repo_id": "cascadia-code", "token_count": 344 }
437
<?xml version='1.0' encoding='UTF-8'?> <glyph name="noonRing-ar.init.alt" format="2"> <advance width="1200"/> <anchor x="0" y="0" name="overlap"/> <outline> <component base="yehKashmiri-ar.init.alt"/> <component base="dotabove-ar" xOffset="275" yOffset="335"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>1</integer> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>dotabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/noonR_ing-ar.init.alt.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/noonR_ing-ar.init.alt.glif", "repo_id": "cascadia-code", "token_count": 398 }
438
<?xml version='1.0' encoding='UTF-8'?> <glyph name="qaf-ar" format="2"> <advance width="1200"/> <unicode hex="0642"/> <outline> <component base="qafDotless-ar"/> <component base="twodotshorizontalabove-ar" xOffset="213" yOffset="224"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotshorizontalabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/qaf-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/qaf-ar.glif", "repo_id": "cascadia-code", "token_count": 356 }
439
<?xml version='1.0' encoding='UTF-8'?> <glyph name="qafDotbelow-ar.init" format="2"> <advance width="1200"/> <outline> <component base="qaf-ar.init"/> <component base="dotbelow-ar" xOffset="113" yOffset="-24"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>dotbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/qafD_otbelow-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/qafD_otbelow-ar.init.glif", "repo_id": "cascadia-code", "token_count": 341 }
440
<?xml version='1.0' encoding='UTF-8'?> <glyph name="sadThreedotsbelow-ar.medi" format="2"> <advance width="1200"/> <outline> <component base="sad-ar.medi"/> <component base="threedotsdownbelow-ar" xOffset="52" yOffset="-18"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsdownbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/sadT_hreedotsbelow-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/sadT_hreedotsbelow-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 348 }
441
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tahThreedots-ar" format="2"> <advance width="1200"/> <unicode hex="069F"/> <outline> <component base="tah-ar"/> <component base="threedotsupabove-ar" xOffset="160" yOffset="343"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsupabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/tahT_hreedots-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/tahT_hreedots-ar.glif", "repo_id": "cascadia-code", "token_count": 353 }
442
<?xml version='1.0' encoding='UTF-8'?> <glyph name="uni08B4.medi" format="2"> <advance width="1200"/> <outline> <component base="kaf-ar.medi"/> <component base="dotbelow-ar" xOffset="47" yOffset="-24"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>dotbelow-ar</string> </dict> </array> <key>com.schriftgestaltung.Glyphs.category</key> <string>Letter</string> <key>com.schriftgestaltung.Glyphs.script</key> <string>arabic</string> <key>com.schriftgestaltung.Glyphs.subCategory</key> <string>Other</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/uni08B_4.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/uni08B_4.medi.glif", "repo_id": "cascadia-code", "token_count": 458 }
443
<?xml version='1.0' encoding='UTF-8'?> <glyph name="yu-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="waw-ar.fina"/> <component base="alefabove-ar" xOffset="50" yOffset="356"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>alefabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/yu-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/yu-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 340 }
444
feature case { # automatic sub dieresiscomb by dieresiscomb.case; sub dotaccentcomb by dotaccentcomb.case; sub gravecomb by gravecomb.case; sub acutecomb by acutecomb.case; sub hungarumlautcomb by hungarumlautcomb.case; sub circumflexcomb by circumflexcomb.case; sub caroncomb by caroncomb.case; sub brevecomb by brevecomb.case; sub ringcomb by ringcomb.case; sub tildecomb by tildecomb.case; sub macroncomb by macroncomb.case; sub hookabovecomb by hookabovecomb.case; sub horncomb by horncomb.case; sub tonos by tonos.case; sub brevecomb-cy by brevecomb-cy.case; } case;
cascadia-code/sources/features/case.fea/0
{ "file_path": "cascadia-code/sources/features/case.fea", "repo_id": "cascadia-code", "token_count": 201 }
445
feature salt { sub f by f.salt; sub florin by f; sub l by l.salt; sub lacute by lacute.salt; sub lbar by lbar.salt; sub lbelt by lbelt.salt; sub lcaron by lcaron.salt; sub lcommaaccent by lcommaaccent.salt; sub ldot by ldot.salt; sub ldotbelow by ldotbelow.salt; sub llinebelow by llinebelow.salt; sub lmiddletilde by lmiddletilde.salt; sub lslash by lslash.salt; sub r by r.salt; sub racute by racute.salt; sub rcaron by rcaron.salt; sub rcommaaccent by rcommaaccent.salt; sub rdotbelow by rdotbelow.salt; sub s by s.salt; sub sacute by sacute.salt; sub scaron by scaron.salt; sub scedilla by scedilla.salt; sub scircumflex by scircumflex.salt; sub scommaaccent by scommaaccent.salt; sub sdotbelow by sdotbelow.salt; sub ve-cy by ve-cy.salt; sub ze-cy by ze-cy.salt; sub che-cy by che-cy.salt; sub chedescender-cy by chedescender-cy.salt; } salt;
cascadia-code/sources/features/salt.fea/0
{ "file_path": "cascadia-code/sources/features/salt.fea", "repo_id": "cascadia-code", "token_count": 374 }
446
<?xml version='1.0' encoding='UTF-8'?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>uniE0A0</key> <string>uniE_0A_0.glif</string> <key>uniE0A0.stypo</key> <string>uniE_0A_0.stypo.glif</string> <key>uniE0A1</key> <string>uniE_0A_1.glif</string> <key>uniE0A2</key> <string>uniE_0A_2.glif</string> <key>uniE0A3</key> <string>uniE_0A_3.glif</string> <key>uniE0B0</key> <string>uniE_0B_0.glif</string> <key>uniE0B0.stypo</key> <string>uniE_0B_0.stypo.glif</string> <key>uniE0B1</key> <string>uniE_0B_1.glif</string> <key>uniE0B1.stypo</key> <string>uniE_0B_1.stypo.glif</string> <key>uniE0B2</key> <string>uniE_0B_2.glif</string> <key>uniE0B2.stypo</key> <string>uniE_0B_2.stypo.glif</string> <key>uniE0B3</key> <string>uniE_0B_3.glif</string> <key>uniE0B3.stypo</key> <string>uniE_0B_3.stypo.glif</string> <key>uniE0B4</key> <string>uniE_0B_4.glif</string> <key>uniE0B4.stypo</key> <string>uniE_0B_4.stypo.glif</string> <key>uniE0B5</key> <string>uniE_0B_5.glif</string> <key>uniE0B5.stypo</key> <string>uniE_0B_5.stypo.glif</string> <key>uniE0B6</key> <string>uniE_0B_6.glif</string> <key>uniE0B6.stypo</key> <string>uniE_0B_6.stypo.glif</string> <key>uniE0B7</key> <string>uniE_0B_7.glif</string> <key>uniE0B7.stypo</key> <string>uniE_0B_7.stypo.glif</string> <key>uniE0B8</key> <string>uniE_0B_8.glif</string> <key>uniE0B8.stypo</key> <string>uniE_0B_8.stypo.glif</string> <key>uniE0B9</key> <string>uniE_0B_9.glif</string> <key>uniE0B9.stypo</key> <string>uniE_0B_9.stypo.glif</string> <key>uniE0BA</key> <string>uniE_0B_A_.glif</string> <key>uniE0BA.stypo</key> <string>uniE_0B_A_.stypo.glif</string> <key>uniE0BB</key> <string>uniE_0B_B_.glif</string> <key>uniE0BB.stypo</key> <string>uniE_0B_B_.stypo.glif</string> <key>uniE0BC</key> <string>uniE_0B_C_.glif</string> <key>uniE0BC.stypo</key> <string>uniE_0B_C_.stypo.glif</string> <key>uniE0BD</key> <string>uniE_0B_D_.glif</string> <key>uniE0BD.stypo</key> <string>uniE_0B_D_.stypo.glif</string> <key>uniE0BE</key> <string>uniE_0B_E_.glif</string> <key>uniE0BE.stypo</key> <string>uniE_0B_E_.stypo.glif</string> <key>uniE0BF</key> <string>uniE_0B_F_.glif</string> <key>uniE0BF.stypo</key> <string>uniE_0B_F_.stypo.glif</string> <key>uniE0C0</key> <string>uniE_0C_0.glif</string> <key>uniE0C0.stypo</key> <string>uniE_0C_0.stypo.glif</string> <key>uniE0C1</key> <string>uniE_0C_1.glif</string> <key>uniE0C1.stypo</key> <string>uniE_0C_1.stypo.glif</string> <key>uniE0C2</key> <string>uniE_0C_2.glif</string> <key>uniE0C2.stypo</key> <string>uniE_0C_2.stypo.glif</string> <key>uniE0C3</key> <string>uniE_0C_3.glif</string> <key>uniE0C3.stypo</key> <string>uniE_0C_3.stypo.glif</string> <key>uniE0C4</key> <string>uniE_0C_4.glif</string> <key>uniE0C4.stypo</key> <string>uniE_0C_4.stypo.glif</string> <key>uniE0C5</key> <string>uniE_0C_5.glif</string> <key>uniE0C5.stypo</key> <string>uniE_0C_5.stypo.glif</string> <key>uniE0C6</key> <string>uniE_0C_6.glif</string> <key>uniE0C6.stypo</key> <string>uniE_0C_6.stypo.glif</string> <key>uniE0C7</key> <string>uniE_0C_7.glif</string> <key>uniE0C7.stypo</key> <string>uniE_0C_7.stypo.glif</string> <key>uniE0C8</key> <string>uniE_0C_8.glif</string> <key>uniE0C8.stypo</key> <string>uniE_0C_8.stypo.glif</string> <key>uniE0CA</key> <string>uniE_0C_A_.glif</string> <key>uniE0CA.stypo</key> <string>uniE_0C_A_.stypo.glif</string> <key>uniE0CC</key> <string>uniE_0C_C_.glif</string> <key>uniE0CC.stypo</key> <string>uniE_0C_C_.stypo.glif</string> <key>uniE0CD</key> <string>uniE_0C_D_.glif</string> <key>uniE0CD.stypo</key> <string>uniE_0C_D_.stypo.glif</string> <key>uniE0CE</key> <string>uniE_0C_E_.glif</string> <key>uniE0CE.stypo</key> <string>uniE_0C_E_.stypo.glif</string> <key>uniE0CF</key> <string>uniE_0C_F_.glif</string> <key>uniE0CF.stypo</key> <string>uniE_0C_F_.stypo.glif</string> <key>uniE0D0</key> <string>uniE_0D_0.glif</string> <key>uniE0D1</key> <string>uniE_0D_1.glif</string> <key>uniE0D1.stypo</key> <string>uniE_0D_1.stypo.glif</string> <key>uniE0D2</key> <string>uniE_0D_2.glif</string> <key>uniE0D2.stypo</key> <string>uniE_0D_2.stypo.glif</string> <key>uniE0D4</key> <string>uniE_0D_4.glif</string> <key>uniE0D4.stypo</key> <string>uniE_0D_4.stypo.glif</string> </dict> </plist>
cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/contents.plist/0
{ "file_path": "cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/contents.plist", "repo_id": "cascadia-code", "token_count": 2858 }
447
<?xml version='1.0' encoding='UTF-8'?> <glyph name="uniE0B4.stypo" format="2"> <advance width="1200"/> <note> uniE0B4 </note> <outline> <contour> <point x="0" y="-480" type="line"/> <point x="723" y="-480"/> <point x="1200" y="-8"/> <point x="1200" y="710" type="curve" smooth="yes"/> <point x="1200" y="1427"/> <point x="723" y="1900"/> <point x="0" y="1900" type="curve"/> </contour> <contour> <point x="-100" y="-480" type="line"/> <point x="0" y="-480" type="line"/> <point x="0" y="1900" type="line"/> <point x="-100" y="1900" type="line"/> </contour> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.lastChange</key> <string>2021/10/18 18:01:26</string> <key>public.markColor</key> <string>0.67,0.95,0.38,1</string> </dict> </lib> </glyph>
cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0B_4.stypo.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0B_4.stypo.glif", "repo_id": "cascadia-code", "token_count": 444 }
448
<?xml version='1.0' encoding='UTF-8'?> <glyph name="uniE0BC.stypo" format="2"> <advance width="1200"/> <note> uniE0BC </note> <outline> <contour> <point x="0" y="-480" type="line"/> <point x="1200" y="1900" type="line"/> <point x="0" y="1900" type="line"/> </contour> <contour> <point x="0" y="-480" type="line"/> <point x="0" y="1900" type="line"/> <point x="-100" y="1900" type="line"/> <point x="-100" y="-480" type="line"/> </contour> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.lastChange</key> <string>2021/10/18 18:04:16</string> <key>public.markColor</key> <string>0.67,0.95,0.38,1</string> </dict> </lib> </glyph>
cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0B_C_.stypo.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0B_C_.stypo.glif", "repo_id": "cascadia-code", "token_count": 367 }
449
<?xml version='1.0' encoding='UTF-8'?> <glyph name="car-brake-hold" format="2"> <advance width="512"/> <unicode hex="F0D5E"/> <note> car-brake-hold </note> <outline> <contour> <point x="308" y="384"/> <point x="397" y="333"/> <point x="448" y="244"/> <point x="448" y="140"/> <point x="397" y="51"/> <point x="308" y="0"/> <point x="204" y="0"/> <point x="115" y="51"/> <point x="64" y="140"/> <point x="64" y="244"/> <point x="115" y="333"/> <point x="204" y="384"/> <point x="256" y="384" type="qcurve" smooth="yes"/> </contour> <contour> <point x="297" y="43"/> <point x="365" y="83"/> <point x="405" y="151"/> <point x="405" y="233"/> <point x="365" y="301"/> <point x="297" y="341"/> <point x="215" y="341"/> <point x="147" y="301"/> <point x="107" y="233"/> <point x="107" y="151"/> <point x="147" y="83"/> <point x="215" y="43"/> <point x="256" y="43" type="qcurve" smooth="yes"/> </contour> <contour> <point x="414" y="34" type="line"/> <point x="445" y="65"/> <point x="480" y="147"/> <point x="480" y="237"/> <point x="445" y="319"/> <point x="414" y="350" type="qcurve"/> <point x="437" y="373" type="line"/> <point x="473" y="338"/> <point x="512" y="244"/> <point x="512" y="140"/> <point x="473" y="46"/> <point x="437" y="11" type="qcurve"/> </contour> <contour> <point x="75" y="11" type="line"/> <point x="39" y="46"/> <point x="0" y="140"/> <point x="0" y="244"/> <point x="39" y="338"/> <point x="75" y="373" type="qcurve"/> <point x="98" y="350" type="line"/> <point x="67" y="319"/> <point x="32" y="237"/> <point x="32" y="147"/> <point x="67" y="65"/> <point x="98" y="34" type="qcurve"/> </contour> <contour> <point x="235" y="299" type="line"/> <point x="235" y="213" type="line"/> <point x="277" y="213" type="line"/> <point x="277" y="299" type="line"/> <point x="320" y="299" type="line"/> <point x="320" y="85" type="line"/> <point x="277" y="85" type="line"/> <point x="277" y="171" type="line"/> <point x="235" y="171" type="line"/> <point x="235" y="85" type="line"/> <point x="192" y="85" type="line"/> <point x="192" y="299" type="line"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/car-brake-hold.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/car-brake-hold.glif", "repo_id": "cascadia-code", "token_count": 1268 }
450
<?xml version='1.0' encoding='UTF-8'?> <glyph name="language-lua" format="2"> <advance width="512"/> <unicode hex="F08B1"/> <note> language-lua </note> <outline> <contour> <point x="260" y="341"/> <point x="327" y="314"/> <point x="378" y="263"/> <point x="405" y="196"/> <point x="405" y="124"/> <point x="378" y="57"/> <point x="327" y="6"/> <point x="260" y="-21"/> <point x="188" y="-21"/> <point x="121" y="6"/> <point x="70" y="57"/> <point x="43" y="124"/> <point x="43" y="196"/> <point x="70" y="263"/> <point x="121" y="314"/> <point x="188" y="341"/> <point x="224" y="341" type="qcurve" smooth="yes"/> </contour> <contour> <point x="310" y="171"/> <point x="341" y="202"/> <point x="341" y="246"/> <point x="310" y="277"/> <point x="266" y="277"/> <point x="235" y="246"/> <point x="235" y="202"/> <point x="266" y="171"/> <point x="288" y="171" type="qcurve" smooth="yes"/> </contour> <contour> <point x="438" y="405"/> <point x="469" y="374"/> <point x="469" y="330"/> <point x="438" y="299"/> <point x="394" y="299"/> <point x="363" y="330"/> <point x="363" y="374"/> <point x="394" y="405"/> <point x="416" y="405" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/language-lua.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/language-lua.glif", "repo_id": "cascadia-code", "token_count": 721 }
451
<?xml version='1.0' encoding='UTF-8'?> <glyph name="longitude" format="2"> <advance width="512"/> <unicode hex="F0F5A"/> <note> longitude </note> <outline> <contour> <point x="314" y="405"/> <point x="412" y="348"/> <point x="469" y="250"/> <point x="469" y="134"/> <point x="412" y="36"/> <point x="314" y="-21"/> <point x="198" y="-21"/> <point x="100" y="36"/> <point x="43" y="134"/> <point x="43" y="250"/> <point x="100" y="348"/> <point x="198" y="405"/> <point x="256" y="405" type="qcurve" smooth="yes"/> </contour> <contour> <point x="160" y="106"/> <point x="160" y="278"/> <point x="201" y="354" type="qcurve"/> <point x="149" y="336"/> <point x="85" y="247"/> <point x="85" y="137"/> <point x="149" y="48"/> <point x="201" y="30" type="qcurve"/> </contour> <contour> <point x="282" y="59"/> <point x="310" y="146"/> <point x="310" y="238"/> <point x="282" y="325"/> <point x="256" y="363" type="qcurve"/> <point x="230" y="325"/> <point x="202" y="238"/> <point x="202" y="146"/> <point x="230" y="59"/> <point x="256" y="21" type="qcurve"/> </contour> <contour> <point x="364" y="47"/> <point x="427" y="137"/> <point x="427" y="247"/> <point x="363" y="336"/> <point x="311" y="354" type="qcurve"/> <point x="352" y="278"/> <point x="352" y="106"/> <point x="311" y="30" type="qcurve"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/longitude.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/longitude.glif", "repo_id": "cascadia-code", "token_count": 816 }
452
<?xml version='1.0' encoding='UTF-8'?> <glyph name="pan-left" format="2"> <advance width="512"/> <unicode hex="F0BB9"/> <note> pan-left </note> <outline> <contour> <point x="149" y="107" type="line"/> <point x="53" y="192" type="line"/> <point x="149" y="277" type="line"/> </contour> <contour> <point x="274" y="235"/> <point x="299" y="210"/> <point x="299" y="174"/> <point x="274" y="149"/> <point x="238" y="149"/> <point x="213" y="174"/> <point x="213" y="210"/> <point x="238" y="235"/> <point x="256" y="235" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/pan-left.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/pan-left.glif", "repo_id": "cascadia-code", "token_count": 327 }
453
<?xml version='1.0' encoding='UTF-8'?> <glyph name="smoke-detector-variant" format="2"> <advance width="512"/> <unicode hex="F180B"/> <note> smoke-detector-variant </note> <outline> <contour> <point x="210" y="363"/> <point x="131" y="317"/> <point x="85" y="238"/> <point x="85" y="146"/> <point x="131" y="67"/> <point x="210" y="21"/> <point x="302" y="21"/> <point x="381" y="67"/> <point x="427" y="146"/> <point x="427" y="238"/> <point x="381" y="317"/> <point x="302" y="363"/> <point x="256" y="363" type="qcurve" smooth="yes"/> </contour> <contour> <point x="314" y="405"/> <point x="412" y="348"/> <point x="469" y="250"/> <point x="469" y="134"/> <point x="412" y="36"/> <point x="314" y="-21"/> <point x="198" y="-21"/> <point x="100" y="36"/> <point x="43" y="134"/> <point x="43" y="250"/> <point x="100" y="348"/> <point x="198" y="405"/> <point x="256" y="405" type="qcurve" smooth="yes"/> </contour> <contour> <point x="265" y="213"/> <point x="277" y="201"/> <point x="277" y="183"/> <point x="265" y="171"/> <point x="247" y="171"/> <point x="235" y="183"/> <point x="235" y="201"/> <point x="247" y="213"/> <point x="256" y="213" type="qcurve" smooth="yes"/> </contour> <contour> <point x="213" y="265" type="line"/> <point x="233" y="277"/> <point x="279" y="277"/> <point x="299" y="265" type="qcurve"/> <point x="284" y="250" type="line"/> <point x="270" y="256"/> <point x="242" y="256"/> <point x="229" y="250" type="qcurve"/> </contour> <contour> <point x="320" y="206"/> <point x="314" y="219" type="qcurve"/> <point x="329" y="235" type="line"/> <point x="341" y="215"/> <point x="341" y="169"/> <point x="329" y="149" type="qcurve"/> <point x="314" y="164" type="line"/> <point x="320" y="178"/> <point x="320" y="192" type="qcurve" smooth="yes"/> </contour> <contour> <point x="192" y="178"/> <point x="198" y="164" type="qcurve"/> <point x="183" y="149" type="line"/> <point x="171" y="169"/> <point x="171" y="215"/> <point x="183" y="235" type="qcurve"/> <point x="198" y="219" type="line"/> <point x="192" y="206"/> <point x="192" y="192" type="qcurve" smooth="yes"/> </contour> <contour> <point x="299" y="119" type="line"/> <point x="279" y="107"/> <point x="233" y="107"/> <point x="213" y="119" type="qcurve"/> <point x="229" y="134" type="line"/> <point x="242" y="128"/> <point x="270" y="128"/> <point x="284" y="134" type="qcurve"/> </contour> <contour> <point x="384" y="233"/> <point x="384" y="151"/> <point x="360" y="118" type="qcurve"/> <point x="345" y="133" type="line"/> <point x="363" y="160"/> <point x="363" y="224"/> <point x="345" y="251" type="qcurve"/> <point x="360" y="266" type="line"/> </contour> <contour> <point x="182" y="296" type="line"/> <point x="215" y="320"/> <point x="297" y="320"/> <point x="330" y="296" type="qcurve"/> <point x="315" y="281" type="line"/> <point x="288" y="299"/> <point x="224" y="299"/> <point x="197" y="281" type="qcurve"/> </contour> <contour> <point x="330" y="88" type="line"/> <point x="297" y="64"/> <point x="215" y="64"/> <point x="182" y="88" type="qcurve"/> <point x="197" y="103" type="line"/> <point x="224" y="85"/> <point x="288" y="85"/> <point x="315" y="103" type="qcurve"/> </contour> <contour> <point x="152" y="118" type="line"/> <point x="128" y="151"/> <point x="128" y="233"/> <point x="152" y="266" type="qcurve"/> <point x="167" y="251" type="line"/> <point x="149" y="224"/> <point x="149" y="160"/> <point x="167" y="133" type="qcurve"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/smoke-detector-variant.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/smoke-detector-variant.glif", "repo_id": "cascadia-code", "token_count": 2094 }
454
<?xml version='1.0' encoding='UTF-8'?> <glyph name="toggle-switch-outline" format="2"> <advance width="512"/> <unicode hex="F0A1A"/> <note> toggle-switch-outline </note> <outline> <contour> <point x="397" y="320"/> <point x="456" y="286"/> <point x="491" y="227"/> <point x="491" y="157"/> <point x="456" y="98"/> <point x="397" y="64"/> <point x="363" y="64" type="qcurve" smooth="yes"/> <point x="149" y="64" type="line" smooth="yes"/> <point x="115" y="64"/> <point x="56" y="98"/> <point x="21" y="157"/> <point x="21" y="227"/> <point x="56" y="286"/> <point x="115" y="320"/> <point x="149" y="320" type="qcurve" smooth="yes"/> <point x="363" y="320" type="line" smooth="yes"/> </contour> <contour> <point x="398" y="107"/> <point x="448" y="157"/> <point x="448" y="227"/> <point x="398" y="277"/> <point x="363" y="277" type="qcurve" smooth="yes"/> <point x="149" y="277" type="line" smooth="yes"/> <point x="114" y="277"/> <point x="64" y="227"/> <point x="64" y="157"/> <point x="114" y="107"/> <point x="149" y="107" type="qcurve" smooth="yes"/> <point x="363" y="107" type="line" smooth="yes"/> </contour> <contour> <point x="389" y="256"/> <point x="427" y="219"/> <point x="427" y="165"/> <point x="389" y="128"/> <point x="336" y="128"/> <point x="299" y="165"/> <point x="299" y="219"/> <point x="336" y="256"/> <point x="363" y="256" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/toggle-switch-outline.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/toggle-switch-outline.glif", "repo_id": "cascadia-code", "token_count": 810 }
455
<?xml version='1.0' encoding='UTF-8'?> <glyph name="wiper-wash-alert" format="2"> <advance width="512"/> <unicode hex="F18DF"/> <note> wiper-wash-alert </note> <outline> <contour> <point x="281" y="316"/> <point x="321" y="299" type="qcurve" smooth="yes"/> <point x="354" y="285"/> <point x="381" y="263" type="qcurve" smooth="yes"/> <point x="401" y="247"/> <point x="416" y="229" type="qcurve" smooth="yes"/> <point x="427" y="213" type="line"/> <point x="277" y="64" type="line"/> <point x="272" y="72" type="line" smooth="yes"/> <point x="266" y="81"/> <point x="258" y="88" type="qcurve" smooth="yes"/> <point x="247" y="98"/> <point x="235" y="102" type="qcurve"/> <point x="235" y="58" type="line"/> <point x="244" y="53"/> <point x="256" y="33"/> <point x="256" y="21" type="qcurve" smooth="yes"/> <point x="256" y="4"/> <point x="231" y="-21"/> <point x="196" y="-21"/> <point x="171" y="4"/> <point x="171" y="21" type="qcurve" smooth="yes"/> <point x="171" y="33"/> <point x="182" y="53"/> <point x="192" y="58" type="qcurve"/> <point x="192" y="102" type="line"/> <point x="180" y="98"/> <point x="169" y="88" type="qcurve" smooth="yes"/> <point x="161" y="81"/> <point x="154" y="72" type="qcurve" smooth="yes"/> <point x="149" y="64" type="line"/> <point x="0" y="213" type="line"/> <point x="11" y="229" type="line" smooth="yes"/> <point x="26" y="247"/> <point x="46" y="263" type="qcurve" smooth="yes"/> <point x="73" y="286"/> <point x="105" y="300" type="qcurve" smooth="yes"/> <point x="146" y="317"/> <point x="192" y="320" type="qcurve"/> <point x="192" y="329"/> <point x="189" y="339" type="qcurve" smooth="yes"/> <point x="185" y="355"/> <point x="175" y="365" type="qcurve"/> <point x="213" y="397" type="line"/> <point x="252" y="367" type="line"/> <point x="242" y="356"/> <point x="238" y="339" type="qcurve" smooth="yes"/> <point x="235" y="329"/> <point x="235" y="320" type="qcurve"/> </contour> <contour> <point x="192" y="275" type="line"/> <point x="112" y="270"/> <point x="58" y="215" type="qcurve"/> <point x="147" y="126" type="line"/> <point x="165" y="140"/> <point x="192" y="147" type="qcurve"/> </contour> <contour> <point x="369" y="215" type="line"/> <point x="347" y="237"/> <point x="319" y="252" type="qcurve" smooth="yes"/> <point x="281" y="271"/> <point x="235" y="275" type="qcurve"/> <point x="235" y="147" type="line"/> <point x="255" y="142"/> <point x="279" y="126" type="qcurve"/> </contour> <contour> <point x="322" y="427"/> <point x="275" y="427"/> <point x="256" y="420" type="qcurve"/> <point x="271" y="380" type="line"/> <point x="284" y="385"/> <point x="315" y="385"/> <point x="326" y="380" type="qcurve"/> <point x="341" y="420" type="line"/> </contour> <contour> <point x="405" y="332"/> <point x="402" y="346" type="qcurve" smooth="yes"/> <point x="395" y="374"/> <point x="378" y="393" type="qcurve"/> <point x="346" y="365" type="line"/> <point x="353" y="357"/> <point x="358" y="346" type="qcurve" smooth="yes"/> <point x="361" y="337"/> <point x="362" y="328" type="qcurve" smooth="yes"/> <point x="363" y="320" type="line"/> <point x="405" y="320" type="line"/> </contour> <contour> <point x="31" y="374"/> <point x="25" y="346" type="qcurve" smooth="yes"/> <point x="21" y="332"/> <point x="21" y="320" type="qcurve"/> <point x="64" y="320" type="line"/> <point x="65" y="328" type="line" smooth="yes"/> <point x="66" y="337"/> <point x="68" y="346" type="qcurve" smooth="yes"/> <point x="72" y="357"/> <point x="79" y="365" type="qcurve"/> <point x="47" y="393" type="line"/> </contour> <contour> <point x="151" y="427"/> <point x="105" y="427"/> <point x="85" y="420" type="qcurve"/> <point x="100" y="380" type="line"/> <point x="113" y="385"/> <point x="145" y="385"/> <point x="156" y="380" type="qcurve"/> <point x="171" y="420" type="line"/> </contour> <contour> <point x="469" y="171" type="line"/> <point x="512" y="171" type="line"/> <point x="512" y="299" type="line"/> <point x="469" y="299" type="line"/> <point x="469" y="192" type="line"/> </contour> <contour> <point x="469" y="128" type="line"/> <point x="512" y="128" type="line"/> <point x="512" y="85" type="line"/> <point x="469" y="85" type="line"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/wiper-wash-alert.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/wiper-wash-alert.glif", "repo_id": "cascadia-code", "token_count": 2412 }
456
<?xml version='1.0' encoding='UTF-8'?> <glyph name="color-mode" format="2"> <advance width="300"/> <unicode hex="EAC6"/> <note> color-mode </note> <outline> <contour> <point x="186" y="281"/> <point x="246" y="246"/> <point x="281" y="186"/> <point x="281" y="114"/> <point x="246" y="54"/> <point x="186" y="19"/> <point x="114" y="19"/> <point x="54" y="54"/> <point x="19" y="114"/> <point x="19" y="186"/> <point x="54" y="246"/> <point x="114" y="281"/> <point x="150" y="281" type="qcurve" smooth="yes"/> </contour> <contour> <point x="181" y="38"/> <point x="232" y="68"/> <point x="263" y="119"/> <point x="263" y="181"/> <point x="232" y="232"/> <point x="181" y="263"/> <point x="150" y="263" type="qcurve"/> <point x="150" y="38" type="line"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/codicon.ufo/glyphs/color-mode.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/codicon.ufo/glyphs/color-mode.glif", "repo_id": "cascadia-code", "token_count": 468 }
457
<?xml version='1.0' encoding='UTF-8'?> <glyph name="device-camera" format="2"> <advance width="300"/> <unicode hex="EADA"/> <note> device-camera </note> <outline> <contour> <point x="185" y="260" type="line"/> <point x="178" y="263" type="line"/> <point x="122" y="263" type="line"/> <point x="115" y="260" type="line"/> <point x="99" y="244" type="line"/> <point x="28" y="244" type="line"/> <point x="19" y="234" type="line"/> <point x="19" y="66" type="line"/> <point x="28" y="56" type="line"/> <point x="272" y="56" type="line"/> <point x="281" y="66" type="line"/> <point x="281" y="234" type="line"/> <point x="272" y="244" type="line"/> <point x="201" y="244" type="line"/> </contour> <contour> <point x="38" y="225" type="line"/> <point x="103" y="225" type="line"/> <point x="110" y="228" type="line"/> <point x="126" y="244" type="line"/> <point x="174" y="244" type="line"/> <point x="190" y="228" type="line"/> <point x="197" y="225" type="line"/> <point x="263" y="225" type="line"/> <point x="263" y="75" type="line"/> <point x="38" y="75" type="line"/> </contour> <contour> <point x="70" y="206"/> <point x="75" y="201"/> <point x="75" y="193"/> <point x="70" y="188"/> <point x="62" y="187"/> <point x="56" y="193"/> <point x="56" y="201"/> <point x="62" y="206"/> <point x="66" y="206" type="qcurve" smooth="yes"/> </contour> <contour> <point x="134" y="188"/> <point x="113" y="166"/> <point x="113" y="134"/> <point x="134" y="113"/> <point x="166" y="112"/> <point x="188" y="134"/> <point x="188" y="166"/> <point x="166" y="188"/> <point x="150" y="188" type="qcurve" smooth="yes"/> </contour> <contour> <point x="173" y="206"/> <point x="206" y="173"/> <point x="206" y="127"/> <point x="173" y="94"/> <point x="127" y="94"/> <point x="94" y="127"/> <point x="94" y="173"/> <point x="127" y="206"/> <point x="150" y="206" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/codicon.ufo/glyphs/device-camera.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/codicon.ufo/glyphs/device-camera.glif", "repo_id": "cascadia-code", "token_count": 1101 }
458
<?xml version='1.0' encoding='UTF-8'?> <glyph name="circle-expand" format="2"> <advance width="1200"/> <unicode hex="F0E96"/> <note> circle-expand </note> <outline> <contour> <point x="1180" y="1290" type="line"/> <point x="888" y="1290" type="line"/> <point x="1180" y="998" type="line"/> </contour> <contour> <point x="1180" y="130" type="line"/> <point x="1180" y="422" type="line"/> <point x="888" y="130" type="line"/> </contour> <contour> <point x="20" y="130" type="line"/> <point x="312" y="130" type="line"/> <point x="20" y="422" type="line"/> </contour> <contour> <point x="20" y="1290" type="line"/> <point x="20" y="998" type="line"/> <point x="312" y="1290" type="line"/> </contour> <contour> <point x="1055" y="971"/> <point x="960" y="1070" type="qcurve" smooth="yes"/> <point x="861" y="1165"/> <point x="600" y="1232"/> <point x="339" y="1165"/> <point x="145" y="971"/> <point x="78" y="710"/> <point x="145" y="449"/> <point x="339" y="255"/> <point x="600" y="188"/> <point x="861" y="255"/> <point x="1055" y="449"/> <point x="1122" y="710"/> </contour> <contour> <point x="956" y="506"/> <point x="878" y="432" type="qcurve" smooth="yes"/> <point x="804" y="354"/> <point x="600" y="303"/> <point x="396" y="354"/> <point x="244" y="506"/> <point x="193" y="710"/> <point x="244" y="914"/> <point x="396" y="1066"/> <point x="600" y="1117"/> <point x="804" y="1066"/> <point x="956" y="914"/> <point x="1007" y="710"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/circle-expand.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/circle-expand.glif", "repo_id": "cascadia-code", "token_count": 880 }
459
<?xml version='1.0' encoding='UTF-8'?> <glyph name="dolly" format="2"> <advance width="1200"/> <unicode hex="F0E9E"/> <note> dolly </note> <outline> <contour> <point x="676" y="247"/> <point x="627" y="229" type="qcurve" smooth="yes"/> <point x="581" y="212"/> <point x="490" y="255"/> <point x="452" y="350"/> <point x="495" y="441"/> <point x="593" y="476"/> <point x="684" y="433"/> <point x="719" y="338"/> </contour> <contour> <point x="596" y="608"/> <point x="501" y="573" type="qcurve" smooth="yes"/> <point x="407" y="539"/> <point x="321" y="355"/> <point x="389" y="164"/> <point x="576" y="80"/> <point x="765" y="149"/> <point x="851" y="333"/> <point x="782" y="522"/> </contour> <contour> <point x="954" y="934" type="line"/> <point x="871" y="1163" type="line"/> <point x="470" y="1017" type="line"/> <point x="553" y="788" type="line"/> </contour> <contour> <point x="1180" y="496" type="line"/> <point x="1137" y="610" type="line"/> <point x="848" y="504" type="line"/> <point x="862" y="479"/> <point x="882" y="421"/> <point x="888" y="390" type="qcurve"/> </contour> <contour> <point x="1174" y="691" type="line"/> <point x="1091" y="920" type="line"/> <point x="576" y="731" type="line"/> <point x="604" y="650" type="line"/> <point x="722" y="642"/> <point x="808" y="556" type="qcurve"/> </contour> <contour> <point x="295" y="1321" type="line"/> <point x="20" y="1321" type="line"/> <point x="20" y="1200" type="line"/> <point x="209" y="1200" type="line"/> <point x="427" y="605" type="line"/> <point x="450" y="619"/> <point x="510" y="642"/> <point x="541" y="645" type="qcurve"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/dolly.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/dolly.glif", "repo_id": "cascadia-code", "token_count": 970 }
460
<?xml version='1.0' encoding='UTF-8'?> <glyph name="google-cardboard" format="2"> <advance width="1200"/> <unicode hex="F02AE"/> <note> google-cardboard </note> <outline> <contour> <point x="1139" y="1087"/> <point x="1106" y="1087" type="qcurve" smooth="yes"/> <point x="88" y="1087" type="line" smooth="yes"/> <point x="61" y="1087"/> <point x="20" y="1044"/> <point x="20" y="1014" type="qcurve" smooth="yes"/> <point x="20" y="406" type="line" smooth="yes"/> <point x="20" y="376"/> <point x="61" y="333"/> <point x="91" y="333" type="qcurve" smooth="yes"/> <point x="369" y="333" type="line" smooth="yes"/> <point x="390" y="333"/> <point x="428" y="357"/> <point x="434" y="379" type="qcurve"/> <point x="516" y="581" type="line" smooth="yes"/> <point x="526" y="605"/> <point x="573" y="638"/> <point x="627" y="638"/> <point x="674" y="605"/> <point x="684" y="581" type="qcurve" smooth="yes"/> <point x="766" y="379" type="line" smooth="yes"/> <point x="774" y="357"/> <point x="810" y="333"/> <point x="829" y="333" type="qcurve" smooth="yes"/> <point x="1106" y="333" type="line" smooth="yes"/> <point x="1139" y="333"/> <point x="1180" y="376"/> <point x="1180" y="406" type="qcurve" smooth="yes"/> <point x="1180" y="1014" type="line" smooth="yes"/> <point x="1180" y="1044"/> </contour> <contour> <point x="377" y="589"/> <point x="322" y="589" type="qcurve" smooth="yes"/> <point x="271" y="589"/> <point x="194" y="668"/> <point x="194" y="777"/> <point x="268" y="856"/> <point x="377" y="856"/> <point x="450" y="777"/> <point x="450" y="668"/> </contour> <contour> <point x="929" y="589"/> <point x="878" y="589" type="qcurve" smooth="yes"/> <point x="823" y="589"/> <point x="750" y="668"/> <point x="750" y="777"/> <point x="823" y="856"/> <point x="929" y="856"/> <point x="1006" y="777"/> <point x="1006" y="668"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/google-cardboard.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/google-cardboard.glif", "repo_id": "cascadia-code", "token_count": 1071 }
461
<?xml version='1.0' encoding='UTF-8'?> <glyph name="parking" format="2"> <advance width="1200"/> <unicode hex="F03E3"/> <note> parking </note> <outline> <contour> <point x="720" y="788"/> <point x="657" y="788" type="qcurve" smooth="yes"/> <point x="402" y="788" type="line"/> <point x="402" y="1106" type="line"/> <point x="657" y="1106" type="line" smooth="yes"/> <point x="720" y="1106"/> <point x="813" y="1013"/> <point x="813" y="880"/> </contour> <contour> <point x="768" y="1420"/> <point x="639" y="1420" type="qcurve" smooth="yes"/> <point x="88" y="1420" type="line"/> <point x="88" y="0" type="line"/> <point x="402" y="0" type="line"/> <point x="402" y="473" type="line"/> <point x="639" y="473" type="line" smooth="yes"/> <point x="768" y="473"/> <point x="986" y="599"/> <point x="1112" y="817"/> <point x="1112" y="1076"/> <point x="986" y="1294"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/parking.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/parking.glif", "repo_id": "cascadia-code", "token_count": 510 }
462
<?xml version='1.0' encoding='UTF-8'?> <glyph name="eye" format="2"> <advance width="1200"/> <unicode hex="EA70"/> <note> eye </note> <outline> <contour> <point x="104" y="546" type="line"/> <point x="104" y="679"/> <point x="237" y="909"/> <point x="467" y="1042"/> <point x="733" y="1042"/> <point x="963" y="909"/> <point x="1100" y="679"/> <point x="1100" y="546" type="qcurve"/> <point x="1180" y="546" type="line"/> <point x="1180" y="701"/> <point x="1025" y="971"/> <point x="759" y="1126"/> <point x="441" y="1126"/> <point x="175" y="971"/> <point x="20" y="701"/> <point x="20" y="546" type="qcurve"/> </contour> <contour> <point x="352" y="440"/> <point x="494" y="294"/> <point x="706" y="294"/> <point x="848" y="440"/> <point x="848" y="648"/> <point x="706" y="794"/> <point x="494" y="794"/> <point x="352" y="648"/> <point x="352" y="546" type="qcurve" smooth="yes"/> </contour> <contour> <point x="436" y="613"/> <point x="436" y="546" type="qcurve" smooth="yes"/> <point x="436" y="475"/> <point x="534" y="378"/> <point x="666" y="378"/> <point x="768" y="475"/> <point x="768" y="613"/> <point x="666" y="710"/> <point x="534" y="710"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/eye.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/eye.glif", "repo_id": "cascadia-code", "token_count": 721 }
463
from typing import Optional, Union import numpy as np import torch import torchsde from pytorch_lightning import LightningModule from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities.types import STEP_OUTPUT from scotch.latent_learning.functional_relationship import SCOTCHFunctionalRelationships from scotch.latent_learning.graph_distribution import BernoulliDigraphDistributionModule from scotch.latent_learning.scotch_nns import ( DECIEmbedNNCoefficient, NeuralContextualDriftCoefficient, NeuralDiffusionCoefficient, NeuralTrajectoryGraphEncoder, ) from scotch.scotch_utils.graph_metrics import ( confusion_matrix_batched, f1_score, false_discovery_rate, true_positive_rate, ) from scotch.sdes.scotch_sdes import AugmentedSCOTCHSDE, SCOTCHPriorSDE, swap_t_and_batch_dimensions from sklearn.metrics import roc_auc_score from torch import Tensor, nn from causica.distributions import GibbsDAGPrior from causica.distributions.transforms import TensorToTensorDictTransform, shapes_to_slices class LinearScheduler: def __init__(self, iters, maxval=1.0): self._iters = max(1, iters) self._val = maxval / self._iters self._maxval = maxval def step(self): self._val = min(self._maxval, self._val + self._maxval / self._iters) @property def val(self): return self._val class SCOTCHModule(LightningModule): """PyTorch Lightning module implementing the graph-based continuous time series model.""" def __init__( self, embedding_size: int = 32, out_dim_g: int = 32, prior_sparsity_lambda: float = 0.05, gumbel_temp: float = 0.25, context_size: int = 64, hidden_size: int = 128, kl_anneal_iters: int = 1000, learning_rates: Optional[dict] = None, use_adjoint: bool = False, sde_method: str = "euler", dt: float = 1e-2, noise_scale: Union[float, Tensor] = 0.01, record_graph_logits: bool = True, lr_warmup_iters: int = 1, ignore_self_connections: bool = False, layer_norm: bool = False, res_connections: bool = False, deci_diffusion: bool = False, compute_auroc=True, add_diffusion_self_connections: bool = False, sigmoid_output: bool = False, ): """Initialize the SCOTCH module. Some of the arguments are similar to arguments of the DeciModule constructor, while others are specific to SCOTCH. Args: embedding_size, out_dim_g: Hyperparameters of the prior diffusion coefficient network, implemented by a DECIEmbedNN. prior_sparsity_lambda: Sparsity parameter for the prior directed graph distribution. gumbel_temp: Temperature for the Gumbel-Softmax distribution used to sample directed graphs during training. context_size: Dimension of the context vector. Given an observed trajectory, an encoder is used to predict this vector, which is then used as input to the NN modelling the posterior SDE (given trajectory). hidden_size: Size of hidden NN layers in the encoder and posterior SDE NN. kl_anneal_iters: Number of iterations to anneal the KL term for the SDE ELBO. learning_rates: Learning rates for different parts of the components of the model. Keys are ["graph", "qz0_mean_net", "qz0_logstd_net", "pz0_mean", "pz0_logstd", "prior_drift_fn", "diffusion_fn", "posterior_drift_fn", "trajectory_encoder"]. use_adjoint: Whether to use the adjoint method for computing gradients through the (augmented) SDE during training. sde_method: Method to use for solving SDE. dt: Time step to use for solving SDE. noise_scale: Noise scale of the (Laplace) noise distribution for the observed trajectory given the latent trajectory. graph_constrained_posterior: Whether to use the constrain the posterior distribution to respect graphs (in the same manner as the prior); if True, we use a DECIEmbedNN for the posterior record_graph_logits: Whether to record the logits of the graph distribution in the metrics. lr_warmup_iters: Number of iterations to linearly increase the learning rate from 0 to the specified value. ignore_self_connections: If True, the diagonal logits of the predicted adjacency matrix are set to -inf during computation of graph metrics (auroc, f1, tpr, fdr) only. layer_norm: Whether to activate layer_norm for DECI neural networks res_connections: Whether to add residual connections for DECI neural networks deci_diffusion: whether to use DECI for diffusion coefficient (True), or independent nns for each dimension (False) compute_auroc: whether to compute auroc add_diffusion_self_connections: whether to add self-connections to the diffusion coefficient graph. only used if deci_diffusion is True. sigmoid_output: whether to apply sigmoid to the output of the diffusion coefficient nn. Only used if deci_diffusion """ super().__init__() self.embedding_size = embedding_size self.gumbel_temp = gumbel_temp self.is_setup = False self.out_dim_g = out_dim_g self.prior_sparsity_lambda = prior_sparsity_lambda self.context_size = context_size self.hidden_size = hidden_size self.kl_scheduler = LinearScheduler(kl_anneal_iters) if learning_rates is None: learning_rates = {"lambda": 3e-3} self.learning_rates = learning_rates self.use_adjoint = use_adjoint self.sde_method = sde_method self.dt = dt self.noise_scale = noise_scale self.record_graph_logits = record_graph_logits self.lr_warmup_iters = lr_warmup_iters self.ignore_self_connections = ignore_self_connections self.layer_norm = layer_norm self.res_connections = res_connections self.deci_diffusion = deci_diffusion self.add_diffusion_self_connections = add_diffusion_self_connections self.compute_auroc = compute_auroc self.sigmoid_output = sigmoid_output def setup(self, stage: Optional[str] = None): """Set up all components of the SCOTCH SDE model.""" if self.is_setup: return # Already setup if stage not in {TrainerFn.TESTING, TrainerFn.FITTING}: raise ValueError(f"Model can only be setup during the {TrainerFn.FITTING} and {TrainerFn.TESTING} stages.") # Replaces adjacency distribution in DECIModule, necessary to encode directed graph (with self-loop) distrn. variable_group_shapes = self.trainer.datamodule.variable_shapes # type: ignore self.num_nodes = len(variable_group_shapes) self.adjacency_dist_module = BernoulliDigraphDistributionModule(self.num_nodes) self.graph_prior: GibbsDAGPrior = GibbsDAGPrior( num_nodes=self.num_nodes, sparsity_lambda=self.prior_sparsity_lambda ) self.observed_size, _ = shapes_to_slices(variable_group_shapes) self.tensor_to_td = TensorToTensorDictTransform(variable_group_shapes) self.latent_size = self.observed_size # in the SCOTCH model, we use the same dimension for observed/latents if self.layer_norm: deci_drift = SCOTCHFunctionalRelationships( shapes=variable_group_shapes, embedding_size=self.embedding_size, out_dim_g=self.out_dim_g, norm_layer=nn.LayerNorm, res_connection=self.res_connections, ) deci_diffusion = SCOTCHFunctionalRelationships( shapes=variable_group_shapes, embedding_size=self.embedding_size, out_dim_g=self.out_dim_g, norm_layer=nn.LayerNorm, res_connection=self.res_connections, sigmoid_output=self.sigmoid_output, ) else: deci_drift = SCOTCHFunctionalRelationships( shapes=variable_group_shapes, embedding_size=self.embedding_size, out_dim_g=self.out_dim_g, res_connection=self.res_connections, ) deci_diffusion = SCOTCHFunctionalRelationships( shapes=variable_group_shapes, embedding_size=self.embedding_size, out_dim_g=self.out_dim_g, res_connection=self.res_connections, sigmoid_output=self.sigmoid_output, ) # SDE components self.qz0_mean_net = nn.Linear(self.context_size, self.latent_size) self.qz0_logstd_net = nn.Linear(self.context_size, self.latent_size) self.pz0_mean = nn.Parameter(torch.zeros(1, self.latent_size)) self.pz0_logstd = nn.Parameter(torch.zeros(1, self.latent_size)) self.prior_drift_fn = DECIEmbedNNCoefficient(deci_drift.nn) if self.deci_diffusion: self.diffusion_fn = DECIEmbedNNCoefficient( deci_diffusion.nn, add_self_connections=self.add_diffusion_self_connections ) else: self.diffusion_fn = NeuralDiffusionCoefficient(latent_size=self.latent_size, hidden_size=self.hidden_size) self.posterior_drift_fn = NeuralContextualDriftCoefficient( latent_size=self.latent_size, hidden_size=self.hidden_size, context_size=self.context_size ) self.trajectory_encoder = NeuralTrajectoryGraphEncoder( observed_size=self.observed_size, hidden_size=self.hidden_size, context_size=self.context_size ) self.ts = self.trainer.datamodule.ts # type: ignore self.is_setup = True def training_step(self, *args, **kwargs) -> STEP_OUTPUT: """Compute the loss function (ELBO) for SCOTCH for the batch; involves integrating an augmented SDE.""" _ = kwargs batch, *_ = args # TensorDict mapping variable name to Tensor of shape (batch_size, variable_group_size, num_time_points) batch = batch.apply(lambda t: t.to(torch.float32, non_blocking=True)) batch_unrolled = self.tensor_to_td.inv(batch) # Step 1: Sample a graph from variational posterior over graphs adjacency_distribution = self.adjacency_dist_module() sampled_graphs = adjacency_distribution.relaxed_sample( torch.Size([batch_unrolled.shape[0]]), temperature=self.gumbel_temp ) # hard gumbel-softmax samples, shape (batch_size, num_nodes, num_nodes) # Step 2: For each observed trajectory, compute context for the variational posterior (over latent trajectories) # Context vectors are Tensors of shape (batch_size, num_time_points, context_size) context_vectors = self.trajectory_encoder( torch.flip(batch_unrolled, dims=(1,)), sampled_graphs ) # feed into encoder with reversed time context_vectors = torch.flip(context_vectors, dims=(1,)) # unflip time dimension ts_context_vectors = (self.ts, context_vectors) # Step 3: Sample initial latent state qz0_mean, qz0_logstd = self.qz0_mean_net(context_vectors[:, 0, :]), self.qz0_logstd_net( context_vectors[:, 0, :] ) qz0 = torch.distributions.Normal(loc=qz0_mean, scale=qz0_logstd.exp()) z0 = qz0.rsample() # shape (batch_size, latent_size) # Step 4: Compute loss components # Compute KL (t = 0) pz0 = torch.distributions.Normal(loc=self.pz0_mean, scale=self.pz0_logstd.exp()) logqp0 = torch.distributions.kl_divergence(qz0, pz0).sum(dim=1) # KL (t = 0) # Compute KL (path) and log-likelihood of observed trajectories aug_z0 = nn.functional.pad(z0, (0, 1), value=0.0) aug_sde = AugmentedSCOTCHSDE( posterior_drift_net=self.posterior_drift_fn, diffusion_net=self.diffusion_fn, prior_drift_net=self.prior_drift_fn, ts_context_vectors=ts_context_vectors, graphs=sampled_graphs, ) # aug_zs_t_first has shape (num_time_points, batch_size, latent_size + 1) if self.use_adjoint: # Adjoint method requires parameters to be explicitly passed in. aug_zs_t_first = torchsde.sdeint_adjoint( aug_sde, aug_z0, self.ts, adjoint_params=self.parameters(), dt=self.dt, method=self.sde_method, names={"drift": "f", "diffusion": "g"}, ) else: aug_zs_t_first = torchsde.sdeint( aug_sde, aug_z0, self.ts, dt=self.dt, method=self.sde_method, names={"drift": "f", "diffusion": "g"} ) aug_zs = swap_t_and_batch_dimensions(aug_zs_t_first) zs, logqp_path = aug_zs[:, :, :-1], aug_zs[:, -1, -1] if isinstance(self.noise_scale, Tensor): reshaped_noise_scale = self.noise_scale.repeat(zs.shape[0], zs.shape[1], 1) xs_dist = torch.distributions.Laplace(loc=zs, scale=reshaped_noise_scale) else: xs_dist = torch.distributions.Laplace(loc=zs, scale=self.noise_scale) log_pxs_tensor = xs_dist.log_prob(batch_unrolled) log_pxs = log_pxs_tensor.sum() # sum over batch, time points, and variables logqp = (logqp0 + logqp_path).sum(dim=0) # Compute expected log-prior and entropy terms for posterior graph distribution log_graph_prior = ( self.graph_prior.log_prob(sampled_graphs) / sampled_graphs.shape[0] ) # expected log-prior graph probability under posterior adjacency_distribution graph_entropy = adjacency_distribution.entropy() # Compute overall loss nll = -log_pxs + logqp * self.kl_scheduler.val - log_graph_prior - graph_entropy step_output = { "loss": nll, "log_pxs": log_pxs, "logqp": logqp, "log_graph_prior": log_graph_prior, "graph_entropy": graph_entropy, } if self.record_graph_logits: for i in range(self.num_nodes): for j in range(self.num_nodes): step_output[f"graph_logits_{i}_{j}"] = adjacency_distribution.logits[i, j] self.log_dict(step_output, prog_bar=True) self.kl_scheduler.step() return step_output def validation_step(self, *args, **kwargs): """Compute validation metrics: test_mse, tpr (true positive rate), fdr (false discovery rate).""" _ = kwargs batch, *_ = args # TensorDict mapping variable name to Tensor of shape (batch_size, variable_group_size, num_time_points) batch = batch.apply(lambda t: t.to(torch.float32, non_blocking=True)) batch_unrolled = self.tensor_to_td.inv(batch) # Extract fixed brownian motion and true graph from the data module bm_validation = self.trainer.datamodule.bm_validation true_graph = self.trainer.datamodule.true_graph # Step 1: Sample a graph from variational posterior over graphs adjacency_distribution = self.adjacency_dist_module() sampled_graphs = adjacency_distribution.sample( torch.Size([batch_unrolled.shape[0]]) ) # samples (w/o gumbel-softmax), shape (batch_size, num_nodes, num_nodes) if self.ignore_self_connections: # remove self-connections for evaluation for i, _ in enumerate(sampled_graphs): sampled_graphs[i] = sampled_graphs[i] - torch.diag(torch.diag(sampled_graphs[i])) # Step 2: Compute metrics logits = adjacency_distribution.logits.cpu().numpy() if self.ignore_self_connections: np.fill_diagonal(logits, -100000) # print("Logits: ", logits) # print("True graph: ", true_graph) if self.compute_auroc: auroc = roc_auc_score(y_true=true_graph.flatten().cpu().numpy(), y_score=logits.flatten()) print("AUROC: ", auroc) conf_matrix = confusion_matrix_batched(true_graph, sampled_graphs) tpr = true_positive_rate(conf_matrix[1, 1], conf_matrix[1, 0]) fdr = false_discovery_rate(conf_matrix[1, 1], conf_matrix[0, 1]) f1 = f1_score(conf_matrix[1, 1], conf_matrix[0, 1], conf_matrix[1, 0]) step_output = {"tpr": tpr, "fdr": fdr, "f1": f1} if self.compute_auroc: step_output["auroc"] = auroc # If brownian motions provided for validation trajectories. if bm_validation is not None: # Initial latent states z0 = batch_unrolled[:, 0, :] prior_sde = SCOTCHPriorSDE( prior_drift_net=self.prior_drift_fn, diffusion_net=self.diffusion_fn, graphs=sampled_graphs ) # Compute predicted trajectory, given prior SDE, initial latent state, and fixed brownian motion if self.use_adjoint: # Adjoint method requires parameters to be explicitly passed in. pred_zs_t_first = torchsde.sdeint_adjoint( prior_sde, z0, self.ts, adjoint_params=self.parameters(), dt=self.dt, bm=bm_validation, method=self.sde_method, names={"drift": "f", "diffusion": "g"}, ) else: pred_zs_t_first = torchsde.sdeint( prior_sde, z0, self.ts, dt=self.dt, bm=bm_validation, method=self.sde_method, names={"drift": "f", "diffusion": "g"}, ) pred_zs = swap_t_and_batch_dimensions(pred_zs_t_first) step_output["test_mse"] = ((pred_zs[:, 1:, :] - batch_unrolled[:, 1:, :]) ** 2).mean() self.log_dict(step_output) return step_output def configure_optimizers(self): modules = { "graph": self.adjacency_dist_module, "qz0_mean_net": self.qz0_mean_net, "qz0_logstd_net": self.qz0_logstd_net, "prior_drift_fn": self.prior_drift_fn, "diffusion_fn": self.diffusion_fn, "posterior_drift_fn": self.posterior_drift_fn, "trajectory_encoder": self.trajectory_encoder, } other_parameters = { "pz0_mean": self.pz0_mean, "pz0_logstd": self.pz0_logstd, } parameter_list = [ {"params": module.parameters(), "lr": self.learning_rates[name], "name": name} for name, module in modules.items() ] + [ {"params": [parameter], "lr": self.learning_rates[name], "name": name} for name, parameter in other_parameters.items() ] optimizer = torch.optim.Adam(parameter_list) return { "optimizer": optimizer, "lr_scheduler": torch.optim.lr_scheduler.LambdaLR( optimizer, lambda epoch: min(1, epoch / self.lr_warmup_iters) ), } def sample(self, batch_size, ts, bm=None, z0=None): adjacency_distribution = self.adjacency_dist_module() # self.sem_module.adjacency_module() # hard sample sampled_graphs = adjacency_distribution.sample(torch.Size([batch_size])) # Sample initial positions if z0 is None: eps = torch.randn(size=(batch_size, *self.pz0_mean.shape[1:]), device=self.pz0_mean.device) z0 = self.pz0_mean + self.pz0_logstd.exp() * eps sde = SCOTCHPriorSDE(self.prior_drift_fn, self.diffusion_fn, sampled_graphs) return sde.sample(z0, ts, bm, dt=self.dt)
causica/research_experiments/scotch/src/scotch/latent_learning/scotch_module.py/0
{ "file_path": "causica/research_experiments/scotch/src/scotch/latent_learning/scotch_module.py", "repo_id": "causica", "token_count": 9229 }
464
import abc from typing import Mapping import torch import torch.distributions as td from causica.data_generation.samplers.sampler import Sampler from causica.distributions import JointNoiseModule from causica.distributions.noise import NoiseModule, UnivariateNormalNoiseModule from causica.distributions.noise.bernoulli import BernoulliNoiseModule from causica.distributions.noise.categorical import CategoricalNoiseModule from causica.distributions.noise.univariate_cauchy import UnivariateCauchyNoiseModule from causica.distributions.noise.univariate_laplace import UnivariateLaplaceNoiseModule class NoiseModuleSampler(Sampler[NoiseModule]): """ An interface of a univariate noise sampler """ @abc.abstractmethod def sample( self, ) -> NoiseModule: """Sample a sample type with given shape""" class JointNoiseModuleSampler(NoiseModuleSampler): """Sampler for JointNoiseModule, given shapes and types of different variables""" def __init__( self, noise_dist_samplers: Mapping[str, NoiseModuleSampler], ): super().__init__() self.noise_dist_samplers = noise_dist_samplers def sample(self) -> JointNoiseModule: noise_modules = {} for key, noise_sampler in self.noise_dist_samplers.items(): noise_modules[key] = noise_sampler.sample() return JointNoiseModule(independent_noise_modules=noise_modules) class UnivariateNormalNoiseModuleSampler(NoiseModuleSampler): """Sample a UnivariateNormalNoiseModule, with standard deviation given by a distribution.""" def __init__(self, std_dist: td.Distribution, dim: int = 1): super().__init__() self.std_dist = std_dist self.dim = dim def sample( self, ): return UnivariateNormalNoiseModule(dim=self.dim, init_log_scale=torch.log(self.std_dist.sample()).item()) class UnivariateLaplaceNoiseModuleSampler(NoiseModuleSampler): """Sample a UnivariateLaplaceNoiseModule, with standard deviation given by a distribution.""" def __init__(self, std_dist: td.Distribution, dim: int = 1): super().__init__() self.std_dist = std_dist self.dim = dim def sample( self, ): log_std_sampled = torch.log(self.std_dist.sample()) return UnivariateLaplaceNoiseModule(dim=self.dim, init_log_scale=log_std_sampled) class UnivariateCauchyNoiseModuleSampler(NoiseModuleSampler): """Sample a UnivariateLaplaceNoiseModule, with standard deviation given by a distribution.""" def __init__(self, std_dist: td.Distribution, dim: int = 1): super().__init__() self.std_dist = std_dist self.dim = dim def sample( self, ): log_std_sampled = torch.log(self.std_dist.sample()) return UnivariateCauchyNoiseModule(dim=self.dim, init_log_scale=log_std_sampled) class BernoulliNoiseModuleSampler(NoiseModuleSampler): """Sample a BernoulliNoiseModule, with base_logits given by a distribution.""" def __init__(self, base_logits_dist: td.Distribution, dim: int = 1): super().__init__() self.base_logits_dist = base_logits_dist self.dim = dim def sample( self, ) -> NoiseModule: base_logits = self.base_logits_dist.sample() return BernoulliNoiseModule(dim=self.dim, init_base_logits=base_logits) class CategoricalNoiseModuleSampler(NoiseModuleSampler): """Sample a CategoricalNoiseModule, with num_classes classes. This does not actually sample but returns the noise.""" def __init__(self, base_logits_dist: td.Distribution | None, num_classes: int = 2): super().__init__() assert num_classes >= 2 self.num_classes = num_classes self.base_logits_dist = base_logits_dist def sample( self, ) -> NoiseModule: init_base_logits = self.base_logits_dist.sample() if self.base_logits_dist else None return CategoricalNoiseModule(num_classes=self.num_classes, init_base_logits=init_base_logits)
causica/src/causica/data_generation/samplers/noise_dist_sampler.py/0
{ "file_path": "causica/src/causica/data_generation/samplers/noise_dist_sampler.py", "repo_id": "causica", "token_count": 1534 }
465
from causica.distributions.adjacency.adjacency_distributions import AdjacencyDistribution from causica.distributions.adjacency.constrained_adjacency_distributions import ( ConstrainedAdjacency, ConstrainedAdjacencyDistribution, ) from causica.distributions.adjacency.directed_acyclic import ErdosRenyiDAGDistribution from causica.distributions.adjacency.enco import ENCOAdjacencyDistribution, ENCOAdjacencyDistributionModule from causica.distributions.adjacency.fixed_adjacency_distribution import FixedAdjacencyDistribution from causica.distributions.adjacency.gibbs_dag_prior import ExpertGraphContainer, GibbsDAGPrior from causica.distributions.adjacency.three_way import ThreeWayAdjacencyDistribution
causica/src/causica/distributions/adjacency/__init__.py/0
{ "file_path": "causica/src/causica/distributions/adjacency/__init__.py", "repo_id": "causica", "token_count": 222 }
466
""" Code containing the original rational quadratic splines implementation here: https://github.com/bayesiains/nsf at file path: nde/transforms/splines/rational_quadratic.py This is a copy-paste with tiny modifications to fit it all in one file It has an MIT license """ import numpy as np import torch from torch.nn import functional as F DEFAULT_MIN_BIN_WIDTH = 1e-3 DEFAULT_MIN_BIN_HEIGHT = 1e-3 DEFAULT_MIN_DERIVATIVE = 1e-3 def searchsorted(bin_locations, inputs, eps=1e-6): bin_locations[..., -1] += eps return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 def unconstrained_rational_quadratic_spline( inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, tails="linear", tail_bound=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE, ): inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) outside_interval_mask = ~inside_interval_mask outputs = torch.zeros_like(inputs) logabsdet = torch.zeros_like(inputs) if tails == "linear": unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) constant = np.log(np.exp(1 - min_derivative) - 1) unnormalized_derivatives[..., 0] = constant unnormalized_derivatives[..., -1] = constant outputs[outside_interval_mask] = inputs[outside_interval_mask] logabsdet[outside_interval_mask] = 0 else: raise RuntimeError(f"{tails} tails are not implemented.") if inside_interval_mask.any(): outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( inputs=inputs[inside_interval_mask], unnormalized_widths=unnormalized_widths[inside_interval_mask, :], unnormalized_heights=unnormalized_heights[inside_interval_mask, :], unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], inverse=inverse, left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, min_bin_width=min_bin_width, min_bin_height=min_bin_height, min_derivative=min_derivative, ) return outputs, logabsdet def rational_quadratic_spline( inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, left=0.0, right=1.0, bottom=0.0, top=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE, ): if torch.min(inputs) < left or torch.max(inputs) > right: raise ValueError("Input Outside Domain") num_bins = unnormalized_widths.shape[-1] if min_bin_width * num_bins > 1.0: raise ValueError("Minimal bin width too large for the number of bins") if min_bin_height * num_bins > 1.0: raise ValueError("Minimal bin height too large for the number of bins") widths = F.softmax(unnormalized_widths, dim=-1) widths = min_bin_width + (1 - min_bin_width * num_bins) * widths cumwidths = torch.cumsum(widths, dim=-1) cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) cumwidths = (right - left) * cumwidths + left cumwidths[..., 0] = left cumwidths[..., -1] = right widths = cumwidths[..., 1:] - cumwidths[..., :-1] derivatives = min_derivative + F.softplus(unnormalized_derivatives) heights = F.softmax(unnormalized_heights, dim=-1) heights = min_bin_height + (1 - min_bin_height * num_bins) * heights cumheights = torch.cumsum(heights, dim=-1) cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) cumheights = (top - bottom) * cumheights + bottom cumheights[..., 0] = bottom cumheights[..., -1] = top heights = cumheights[..., 1:] - cumheights[..., :-1] if inverse: bin_idx = searchsorted(cumheights, inputs)[..., None] else: bin_idx = searchsorted(cumwidths, inputs)[..., None] input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] input_bin_widths = widths.gather(-1, bin_idx)[..., 0] input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] delta = heights / widths input_delta = delta.gather(-1, bin_idx)[..., 0] input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] input_heights = heights.gather(-1, bin_idx)[..., 0] if inverse: a = (inputs - input_cumheights) * ( input_derivatives + input_derivatives_plus_one - 2 * input_delta ) + input_heights * (input_delta - input_derivatives) b = input_heights * input_derivatives - (inputs - input_cumheights) * ( input_derivatives + input_derivatives_plus_one - 2 * input_delta ) c = -input_delta * (inputs - input_cumheights) discriminant = b.pow(2) - 4 * a * c assert (discriminant >= 0).all() root = (2 * c) / (-b - torch.sqrt(discriminant)) outputs = root * input_bin_widths + input_cumwidths theta_one_minus_theta = root * (1 - root) denominator = input_delta + ( (input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta ) derivative_numerator = input_delta.pow(2) * ( input_derivatives_plus_one * root.pow(2) + 2 * input_delta * theta_one_minus_theta + input_derivatives * (1 - root).pow(2) ) logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) return outputs, -logabsdet theta = (inputs - input_cumwidths) / input_bin_widths theta_one_minus_theta = theta * (1 - theta) numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta) denominator = input_delta + ( (input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta ) outputs = input_cumheights + numerator / denominator derivative_numerator = input_delta.pow(2) * ( input_derivatives_plus_one * theta.pow(2) + 2 * input_delta * theta_one_minus_theta + input_derivatives * (1 - theta).pow(2) ) logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) return outputs, logabsdet
causica/src/causica/distributions/noise/spline/bayesiains_nsf_rqs.py/0
{ "file_path": "causica/src/causica/distributions/noise/spline/bayesiains_nsf_rqs.py", "repo_id": "causica", "token_count": 2836 }
467
import time from queue import Queue import mlflow class MLFlowBatch: """ A class for collecting MLFlow logs and submitting them as a batch to speed up logging. It will automatically batch all logs, once the batch size is reached. Accumulated metrics can be manually logged by calling flush. """ def __init__(self, batch_size: int) -> None: self._queue: Queue = Queue(maxsize=batch_size) # mypy requires explicit type annotation here. self._client = mlflow.tracking.MlflowClient() active_run = mlflow.active_run() if active_run is None: raise ValueError("No active ML flow run.") self._run_id = active_run.info.run_id def log_metric(self, key: str, value: float, step: int = 0): """Stores a metric in the log batch and pushes if the batch is full.""" metric = mlflow.entities.Metric(key=key, value=float(value), timestamp=int(time.time()), step=step) self._queue.put(metric) if self._queue.full(): self.flush() def flush(self): """Manually push the accumulated logs.""" metrics = [] while not self._queue.empty(): metrics.append(self._queue.get()) self._client.log_batch(run_id=self._run_id, metrics=metrics)
causica/src/causica/mlflow_helpers.py/0
{ "file_path": "causica/src/causica/mlflow_helpers.py", "repo_id": "causica", "token_count": 496 }
468
import numpy as np import pytest import torch import torch.testing from causica.datasets.loaded_expert_graph_container import LoadedExpertGraphContainer @pytest.mark.parametrize("file_type", ["npy", "csv"]) def test_loaded_expert_graph_container(tmp_path, file_type: str): """Test the loaded expert graph.""" adj_matrix = torch.triu(torch.ones(10, 10, dtype=torch.bool), diagonal=1) # Add some nans adj_matrix = adj_matrix.masked_fill(torch.rand(size=adj_matrix.shape) < 0.5, np.nan) mask = ~torch.isnan(adj_matrix) if file_type == "npy": adj_matrix_path = tmp_path / "expert_graph.npy" with adj_matrix_path.open("wb") as f: np.save(f, adj_matrix.numpy()) elif file_type == "csv": adj_matrix_path = tmp_path / "expert_graph.csv" with adj_matrix_path.open("w") as f: np.savetxt(f, adj_matrix.numpy(), delimiter=",") else: raise ValueError(f"Unknown file type {file_type}") expert_graph_container = LoadedExpertGraphContainer(str(adj_matrix_path), confidence=0.9, scale=1.0) assert torch.equal(torch.tensor(adj_matrix, dtype=torch.int64), expert_graph_container.dag) assert torch.equal(mask, expert_graph_container.mask)
causica/test/datasets/test_loaded_expert_graph_container.py/0
{ "file_path": "causica/test/datasets/test_loaded_expert_graph_container.py", "repo_id": "causica", "token_count": 514 }
469
import math import pytest import torch from causica.distributions.noise import CategoricalNoise def test_init(): base_logits = torch.Tensor([0.3, 0.2, 0.1]) # no batch x_hat = torch.Tensor([0.3, 0.2, 0.1]) noise_model = CategoricalNoise(x_hat, base_logits) assert noise_model.logits.shape == torch.Size([3]) logits = base_logits + x_hat assert torch.equal(noise_model.logits, logits - logits.logsumexp(dim=-1, keepdim=True)) # batch size 1 x_hat = torch.Tensor([[0.3, 0.2, 0.1], [0.4, 0.5, 0.6]]) noise_model = CategoricalNoise(x_hat, base_logits) assert noise_model.logits.shape == torch.Size([2, 3]) logits = base_logits + x_hat assert torch.equal(noise_model.logits, logits - logits.logsumexp(dim=-1, keepdim=True)) @pytest.mark.parametrize( "base_logits,x_hat", [ (0.0, torch.tensor([0.0, 0.0, 0.0])), (1.0, torch.tensor([0.0, 0.0, 0.0])), (0.0, torch.tensor([0.1, 0.5, 1.0])), (1.0, torch.tensor([1.0, 2.0, 3.0])), (1.0, torch.tensor([-1.0, 2.0, -1.0])), ], ) def test_noise_reconstruction(base_logits, x_hat): n = 5000 x_hat = x_hat.repeat(n, 1) noise_model = CategoricalNoise(delta_logits=x_hat, base_logits=base_logits) # generate samples samples = noise_model.sample() # [5000, 3] # test samples noise = noise_model.sample_to_noise(samples) # get posterior sample again post_sample = noise_model.noise_to_sample(noise) assert torch.allclose(samples, post_sample) # test noise post_noise = noise_model.sample_to_noise(post_sample) # This is 8 sigma of a Logistic variable eight_sigma = 8 * math.pi / math.sqrt(3 * n) assert abs(noise.mean() - post_noise.mean()) < eight_sigma
causica/test/distributions/noise/test_categorical.py/0
{ "file_path": "causica/test/distributions/noise/test_categorical.py", "repo_id": "causica", "token_count": 803 }
470
import os import tempfile import pytest import torch from causica.datasets.causica_dataset_format import CAUSICA_DATASETS_PATH, DataEnum, load_data, save_data, save_dataset from causica.datasets.tensordict_utils import tensordict_shapes def _load_and_test_dataset(root_path: str): variables_metadata = load_data(root_path, DataEnum.VARIABLES_JSON) train_data = load_data(root_path, DataEnum.TRAIN, variables_metadata) test_data = load_data(root_path, DataEnum.TEST, variables_metadata) assert tensordict_shapes(train_data) == tensordict_shapes(test_data) groups = set(train_data.keys()) interventions = load_data(root_path, DataEnum.INTERVENTIONS, variables_metadata) for (intervention_a, intervention_b, _) in interventions: int_groups_a = set(intervention_a.intervention_values.keys()) | intervention_a.sampled_nodes int_groups_b = set(intervention_b.intervention_values.keys()) | intervention_b.sampled_nodes assert groups == int_groups_a assert groups == int_groups_b assert tensordict_shapes(intervention_a.intervention_data) == tensordict_shapes( intervention_b.intervention_data ) adj_mat = load_data(root_path, DataEnum.TRUE_ADJACENCY) num_nodes = len(train_data.keys()) assert adj_mat.shape == (num_nodes, num_nodes) return variables_metadata, train_data, test_data, adj_mat, interventions def load_and_test_counterfactuals(root_path): variables_metadata = load_data(root_path, DataEnum.VARIABLES_JSON) # not all counterfactuals exist counterfactuals = load_data(root_path, DataEnum.COUNTERFACTUALS, variables_metadata) for (intervention_a, intervention_b, _) in counterfactuals: int_groups_a = set(intervention_a.intervention_values.keys()) | intervention_a.sampled_nodes int_groups_b = set(intervention_b.intervention_values.keys()) | intervention_b.sampled_nodes assert int_groups_a == int_groups_b # all nodes must have the same dimensions in both factual and counterfactual data assert tensordict_shapes(intervention_a.counterfactual_data) == tensordict_shapes( intervention_b.counterfactual_data ) assert tensordict_shapes(intervention_a.counterfactual_data) == tensordict_shapes(intervention_a.factual_data) assert tensordict_shapes(intervention_a.factual_data) == tensordict_shapes(intervention_b.factual_data) # there must be the same numbers of factual and counterfactual in each dataset assert intervention_a.factual_data.batch_size == intervention_a.counterfactual_data.batch_size assert intervention_b.factual_data.batch_size == intervention_b.counterfactual_data.batch_size return variables_metadata, counterfactuals @pytest.mark.parametrize("dataset", ["csuite_weak_arrows", "csuite_linexp_2", "csuite_cts_to_cat"]) def test_load_save_csuite(dataset): """Test that we can load a csuite dataset, save it, and load it again""" root_path = os.path.join(CAUSICA_DATASETS_PATH, dataset) variables_metadata, train_data, test_data, adj_mat, interventions = _load_and_test_dataset(root_path) with tempfile.TemporaryDirectory() as tmpdir: save_dataset( tmpdir, variables_metadata, adj_mat, train_data, test_data, interventions=interventions, overwrite=True ) variables_metadata_2, train_data_2, test_data_2, adj_mat_2, interventions_2 = _load_and_test_dataset(tmpdir) assert variables_metadata == variables_metadata_2 torch.testing.assert_allclose(adj_mat, adj_mat_2) torch.testing.assert_allclose(train_data, train_data_2) assert tensordict_shapes(train_data) == tensordict_shapes(train_data_2) assert tensordict_shapes(test_data) == tensordict_shapes(test_data_2) assert len(interventions) == len(interventions_2) for intervention_1, intervention_2 in zip(interventions, interventions_2): for field in ["intervention_data", "intervention_values", "condition_values"]: torch.testing.assert_allclose(getattr(intervention_1[0], field), getattr(intervention_2[0], field)) torch.testing.assert_allclose(getattr(intervention_1[1], field), getattr(intervention_2[1], field)) assert intervention_1[2] == intervention_2[2] def test_load_save_counterfactuals(): dataset = "csuite_linexp_2" root_path = os.path.join(CAUSICA_DATASETS_PATH, dataset) variables_metadata, counterfactuals = load_and_test_counterfactuals(root_path) with tempfile.TemporaryDirectory() as tmpdir: save_data(tmpdir, variables_metadata, DataEnum.VARIABLES_JSON, variables_metadata) save_data(tmpdir, counterfactuals, DataEnum.COUNTERFACTUALS, variables_metadata) variables_metadata_2, counterfactuals_2 = load_and_test_counterfactuals(tmpdir) assert variables_metadata == variables_metadata_2 assert len(counterfactuals) == len(counterfactuals_2) for cf_1, cf_2 in zip(counterfactuals, counterfactuals_2): for field in ["counterfactual_data", "intervention_values", "factual_data"]: torch.testing.assert_allclose(getattr(cf_1[0], field), getattr(cf_2[0], field)) torch.testing.assert_allclose(getattr(cf_1[1], field), getattr(cf_2[1], field)) assert cf_1[2] == cf_2[2]
causica/test/integration/test_save_load_csuite.py/0
{ "file_path": "causica/test/integration/test_save_load_csuite.py", "repo_id": "causica", "token_count": 2024 }
471
# Clifford Layers [![Documentation](https://img.shields.io/badge/docs-passing-brightgreen)](https://microsoft.github.io/cliffordlayers) For details about usage please see [documentation](https://microsoft.github.io/cliffordlayers). If you have any questions or suggestions please open a [discussion](https://github.com/microsoft/cliffordlayers/discussions). If you notice a bug, please open an [issue](https://github.com/microsoft/cliffordlayers/issues). ## Installation ```bash pip install cliffordlayers ``` ## Citation If you find our work and/or our code useful, please cite us via: ```bibtex @article{brandstetter2022clifford, title={Clifford Neural Layers for PDE Modeling}, author={Brandstetter, Johannes and Berg, Rianne van den and Welling, Max and Gupta, Jayesh K}, journal={arXiv preprint arXiv:2209.04934}, year={2022} } @article{ruhe2023geometric, title={Geometric Clifford Algebra Networks}, author={Ruhe, David and Gupta, Jayesh K and de Keninck, Steven and Welling, Max and Brandstetter, Johannes}, journal={arXiv preprint arXiv:2302.06594}, year={2023} } ``` ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [[email protected]](mailto:[email protected]) with any additional questions or comments. ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies.
cliffordlayers/README.md/0
{ "file_path": "cliffordlayers/README.md", "repo_id": "cliffordlayers", "token_count": 697 }
472
name: cliffordlayers channels: - pytorch - anaconda - defaults dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - asciitree=0.3.3=py_2 - blas=1.0=mkl - bokeh=2.4.3=py38h06a4308_0 - bottleneck=1.3.5=py38h7deecbd_0 - brotlipy=0.7.0=py38h27cfd23_1003 - bzip2=1.0.8=h7b6447c_0 - ca-certificates=2022.07.19=h06a4308_0 - certifi=2022.6.15=py38h06a4308_0 - cffi=1.15.1=py38h74dc2b5_0 - charset-normalizer=2.0.4=pyhd3eb1b0_0 - click=8.0.4=py38h06a4308_0 - cloudpickle=2.0.0=pyhd3eb1b0_0 - cryptography=37.0.1=py38h9ce1e76_0 - cudatoolkit=11.3.1=h2bc3f7f_2 - cytoolz=0.11.0=py38h7b6447c_0 - dask=2022.2.1=pyhd3eb1b0_0 - dask-core=2022.2.1=pyhd3eb1b0_0 - distributed=2022.2.1=pyhd3eb1b0_0 - fasteners=0.16.3=pyhd3eb1b0_0 - ffmpeg=4.3=hf484d3e_0 - freetype=2.11.0=h70c0345_0 - fsspec=2022.3.0=py38h06a4308_0 - giflib=5.2.1=h7b6447c_0 - gmp=6.2.1=h295c915_3 - gnutls=3.6.15=he1e5248_0 - h5py=3.7.0=py38h737f45e_0 - hdf5=1.10.6=h3ffc7dd_1 - heapdict=1.0.1=pyhd3eb1b0_0 - idna=3.3=pyhd3eb1b0_0 - intel-openmp=2021.4.0=h06a4308_3561 - jinja2=3.0.3=pyhd3eb1b0_0 - joblib=1.1.0=pyhd3eb1b0_0 - jpeg=9e=h7f8727e_0 - lame=3.100=h7b6447c_0 - lcms2=2.12=h3be6417_0 - ld_impl_linux-64=2.38=h1181459_1 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgfortran-ng=11.2.0=h00389a5_1 - libgfortran5=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libiconv=1.16=h7f8727e_2 - libidn2=2.3.2=h7f8727e_0 - libpng=1.6.37=hbc83047_0 - libstdcxx-ng=11.2.0=h1234567_1 - libtasn1=4.16.0=h27cfd23_0 - libtiff=4.2.0=h2818925_1 - libunistring=0.9.10=h27cfd23_0 - libwebp=1.2.2=h55f646e_0 - libwebp-base=1.2.2=h7f8727e_0 - locket=1.0.0=py38h06a4308_0 - lz4-c=1.9.3=h295c915_1 - markupsafe=2.1.1=py38h7f8727e_0 - mkl=2021.4.0=h06a4308_640 - mkl-service=2.4.0=py38h7f8727e_0 - mkl_fft=1.3.1=py38hd3c417c_0 - mkl_random=1.2.2=py38h51133e4_0 - msgpack-python=1.0.3=py38hd09550d_0 - ncurses=6.3=h5eee18b_3 - nettle=3.7.3=hbbd107a_1 - numcodecs=0.9.1=py38h295c915_0 - numexpr=2.8.3=py38h807cd23_0 - numpy=1.23.1=py38h6c91a56_0 - numpy-base=1.23.1=py38ha15fc14_0 - openh264=2.1.1=h4ff587b_0 - openssl=1.1.1q=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pandas=1.4.3=py38h6a678d5_0 - partd=1.2.0=pyhd3eb1b0_1 - pillow=9.2.0=py38hace64e9_1 - pip=22.2.2=py38h06a4308_0 - psutil=5.9.0=py38h5eee18b_0 - pycparser=2.21=pyhd3eb1b0_0 - pyopenssl=22.0.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pysocks=1.7.1=py38h06a4308_0 - python=3.8.13=h12debd9_0 - python-dateutil=2.8.2=pyhd3eb1b0_0 - pytorch=1.12.1=py3.8_cuda11.3_cudnn8.3.2_0 - pytorch-mutex=1.0=cuda - pytz=2022.1=py38h06a4308_0 - pyyaml=6.0=py38h7f8727e_1 - readline=8.1.2=h7f8727e_1 - requests=2.28.1=py38h06a4308_0 - setuptools=61.2.0=py38h06a4308_0 - six=1.16.0=pyhd3eb1b0_1 - sortedcontainers=2.4.0=pyhd3eb1b0_0 - sqlite=3.39.2=h5082296_0 - tblib=1.7.0=pyhd3eb1b0_0 - tk=8.6.12=h1ccaba5_0 - toolz=0.11.2=pyhd3eb1b0_0 - torchaudio=0.12.1=py38_cu113 - torchvision=0.13.1=py38_cu113 - typing-extensions=4.3.0=py38h06a4308_0 - typing_extensions=4.3.0=py38h06a4308_0 - urllib3=1.26.11=py38h06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xarray=0.20.1=pyhd3eb1b0_1 - xz=5.2.5=h7f8727e_1 - yaml=0.2.5=h7b6447c_0 - zarr=2.8.1=pyhd3eb1b0_0 - zict=2.1.0=py38h06a4308_0 - zlib=1.2.12=h7f8727e_2 - zstd=1.5.2=ha4553b6_0 - pip: - absl-py==1.3.0 - aiohttp==3.8.3 - aiosignal==1.2.0 - antlr4-python3-runtime==4.9.3 - anyio==3.6.1 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - asttokens==2.0.8 - async-timeout==4.0.2 - attrs==22.1.0 - backcall==0.2.0 - beautifulsoup4==4.11.1 - bleach==5.0.1 - brotli==1.0.9 - cachetools==5.2.0 - colorama==0.4.6 - commonmark==0.9.1 - contourpy==1.0.5 - cycler==0.11.0 - dash==2.6.1 - dash-core-components==2.0.0 - dash-html-components==2.0.0 - dash-table==5.0.0 - debugpy==1.6.3 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - exceptiongroup==1.0.4 - executing==1.1.1 - fastjsonschema==2.16.2 - fdtd==0.2.5 - flask==2.2.2 - flask-compress==1.13 - fonttools==4.37.4 - frozenlist==1.3.1 - ghp-import==2.1.0 - google-auth==2.12.0 - google-auth-oauthlib==0.4.6 - griffe==0.24.1 - grpcio==1.49.1 - hydra-core==1.2.0 - importlib-metadata==5.0.0 - importlib-resources==5.10.0 - iniconfig==1.1.1 - ipykernel==6.16.0 - ipython==8.5.0 - ipython-genutils==0.2.0 - ipywidgets==8.0.2 - itsdangerous==2.1.2 - jedi==0.18.1 - jsonargparse==4.15.1 - jsonschema==4.16.0 - jupyter==1.0.0 - jupyter-client==7.4.2 - jupyter-console==6.4.4 - jupyter-core==4.11.1 - jupyter-server==1.21.0 - jupyterlab-pygments==0.2.2 - jupyterlab-widgets==3.0.3 - kiwisolver==1.4.4 - markdown==3.3.7 - matplotlib==3.6.1 - matplotlib-inline==0.1.6 - mergedeep==1.3.4 - mistune==2.0.4 - mkdocs==1.4.2 - mkdocs-autorefs==0.4.1 - mkdocs-material==8.5.11 - mkdocs-material-extensions==1.1.1 - mkdocstrings==0.19.0 - mkdocstrings-python==0.8.2 - multidict==6.0.2 - nbclassic==0.4.5 - nbclient==0.7.0 - nbconvert==7.2.1 - nbformat==5.7.0 - nest-asyncio==1.5.6 - notebook==6.5.1 - notebook-shim==0.2.0 - oauthlib==3.2.1 - omegaconf==2.2.3 - pandocfilters==1.5.0 - parso==0.8.3 - pexpect==4.8.0 - phiflow==2.1.4 - pickleshare==0.7.5 - pkgutil-resolve-name==1.3.10 - plotly==5.10.0 - pluggy==1.0.0 - portalocker==2.5.1 - prometheus-client==0.15.0 - prompt-toolkit==3.0.31 - protobuf==3.19.6 - ptyprocess==0.7.0 - pure-eval==0.2.2 - pyasn1==0.4.8 - pyasn1-modules==0.2.8 - pydeprecate==0.3.2 - pygments==2.13.0 - pymdown-extensions==9.9 - pyrsistent==0.18.1 - pytest==7.2.0 - pytorch-lightning==1.6.3 - pyyaml-env-tag==0.1 - pyzmq==24.0.1 - qtconsole==5.3.2 - qtpy==2.2.1 - requests-oauthlib==1.3.1 - rich==12.5.1 - rsa==4.9 - scipy==1.9.2 - send2trash==1.8.0 - sniffio==1.3.0 - soupsieve==2.3.2.post1 - stack-data==0.5.1 - tenacity==8.1.0 - tensorboard==2.10.1 - tensorboard-data-server==0.6.1 - tensorboard-plugin-wit==1.8.1 - terminado==0.16.0 - tinycss2==1.2.1 - tomli==2.0.1 - torchdata==0.4.1 - torchmetrics==0.10.0 - tornado==6.2 - tqdm==4.64.1 - traitlets==5.4.0 - watchdog==2.1.9 - wcwidth==0.2.5 - webencodings==0.5.1 - websocket-client==1.4.1 - werkzeug==2.2.2 - widgetsnbextension==4.0.3 - yarl==1.8.1 - zipp==3.9.0
cliffordlayers/docker/environment.yml/0
{ "file_path": "cliffordlayers/docker/environment.yml", "repo_id": "cliffordlayers", "token_count": 4197 }
473
import torch from cliffordlayers.nn.modules.gcan import ( CliffordG3ConvTranspose2d, CliffordG3GroupNorm, CliffordG3Conv2d, CliffordG3LinearVSiLU, CliffordG3MeanVSiLU, CliffordG3SumVSiLU, PGAConjugateLinear, MultiVectorAct, ) from cliffordlayers.cliffordalgebra import CliffordAlgebra def test_g3convtranspose2d(): g3convtranspose2d = CliffordG3ConvTranspose2d(8, 8) x = torch.randn(4, 8, 32, 32, 3) y = g3convtranspose2d(x) assert y.shape == (4, 8, 32, 32, 3) def test_g3groupnorm(): g3groupnorm = CliffordG3GroupNorm(8, 8, 3) x = torch.randn(4, 8, 3) y = g3groupnorm(x) assert y.shape == (4, 8, 3) def test_g3vector_act(): g3vectorlinear = CliffordG3LinearVSiLU(8) g3vectorsum = CliffordG3SumVSiLU() g3vectormean = CliffordG3MeanVSiLU() x = torch.randn(4, 8, 32, 32, 3) y = g3vectorlinear(x) assert y.shape == (4, 8, 32, 32, 3) y = g3vectorsum(x) assert y.shape == (4, 8, 32, 32, 3) y = g3vectormean(x) assert y.shape == (4, 8, 32, 32, 3) def test_g3conv2d(): # We operate on the vectors of the G3 algebra. conv = CliffordG3Conv2d( in_channels=3, out_channels=4, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, ) x = torch.randn(4, 3, 3, 3, 3) output = conv(x) assert output.shape == (4, 4, 3, 3, 3) def test_pga_conjugate_linear(): in_features = 8 out_features = 16 algebra = CliffordAlgebra([0, 1, 1, 1]) linear = PGAConjugateLinear( algebra=algebra, in_features=in_features, out_features=out_features, input_blades=(1, 2, 3), # (11, 12, 13) for points. ) vector = torch.randn(4, in_features, 3) output = linear(vector) assert output.shape == (4, out_features, 3) def test_multivectoract(): algebra = CliffordAlgebra([0, 1, 1, 1]) input = torch.randn(4, 3, 3) act = MultiVectorAct(channels=3, algebra=algebra, input_blades=(1, 2, 3)) output = act(input) assert output.shape == (4, 3, 3) if __name__ == "__main__": test_pga_conjugate_linear() test_multivectoract() test_g3conv2d() test_g3vector_act() test_g3groupnorm() test_g3convtranspose2d()
cliffordlayers/tests/test_gcan_layers.py/0
{ "file_path": "cliffordlayers/tests/test_gcan_layers.py", "repo_id": "cliffordlayers", "token_count": 1066 }
474
{ "values": [ { "recordId": "a1", "data": { "text": "But Google is starting from behind. The company made a late push into hardware, and Apple's Siri, available on iPhones, and Amazon's Alexa software, which runs on its Echo and Dot devices, have clear leads in consumer adoption.", "language": "en" } } ] }
cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/app/data/example_request.json/0
{ "file_path": "cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/app/data/example_request.json", "repo_id": "cookiecutter-spacy-fastapi", "token_count": 172 }
475
# Various settings to configure when deploying the framework. # Service Storage # We do not want to enable the data storage service because we do not want to manage that data storage. # Default is enabled. # Disable the data store for a simple production environment. REACT_APP_ENABLE_SERVICE_DATA_STORE=false # Online Safety # To comply with rules and laws regarding what can be shown, you can enable this setting to be very strict # and hide text and images that are user generated except for items you have explicitly allowed. # Configure in: client/src/safety/config.ts # Default is disabled. # Enable online safety for a simple production environment. REACT_APP_ENABLE_ONLINE_SAFETY=true
0xDeCA10B/demo/client/.env/0
{ "file_path": "0xDeCA10B/demo/client/.env", "repo_id": "0xDeCA10B", "token_count": 173 }
0
import Container from '@material-ui/core/Container' import Link from '@material-ui/core/Link' import { withStyles } from '@material-ui/core/styles' import Typography from '@material-ui/core/Typography' import React from 'react' import { version } from '../../package.json' import { OnlineSafetyValidator } from '../safety/validator' import { BASE_TITLE } from '../title' const styles = theme => ({ sectionTitle: { marginTop: theme.spacing(1), }, section: { textAlign: 'left', marginTop: theme.spacing(1), }, }) class About extends React.Component { validator = new OnlineSafetyValidator() componentDidMount() { document.title = `About - ${BASE_TITLE}` } render() { const name = "Sharing Updatable Models" const { classes } = this.props return (<Container maxWidth="lg"> <Typography variant="h4" component="h4"> {name} </Typography> <Typography className={classes.sectionTitle} variant="h5" component="h5"> <Link color='inherit' href='#disclaimers' name='disclaimers'>Disclaimers</Link> </Typography> <Typography className={classes.section} component="p"> ⚠ WARNING When you upload a model or data to train a model, that data is most likely added to a version of a third party <Link href='https://ethereum.org/' target='_blank'>Ethereum</Link> blockchain network not controlled by Microsoft. Unless explicitly indicated, your data is not stored on Microsoft controlled machines. Your internet browser allows you to create transactions directly with a blockchain you have chosen through your browser or a browser extension like <Link href='https://metamask.io/' target='_blank'>MetaMask</Link>. Microsoft has no control over these transactions since the request is never sent to Microsoft machines before being sent from your browser directly to the blockchain network. </Typography> <Typography className={classes.section} component="p"> If you have not changed the default blockchain network in your browser's or extension's settings, then it is likely set to use the public Ethereum mainnet. Microsoft does not fully endorse nor support the use of the mainnet or any other third party network because all information in it is public and might be difficult to completely delete. </Typography> <Typography className={classes.sectionTitle} variant="h5" component="h5"> <Link color='inherit' href='#project' name='project'>About This Project</Link> </Typography> <Typography className={classes.section} component="p"> The goal of this project is to promote sharing machine learning models at a greater scale. To achieve this, models are stored on a blockchain and so that people can update the models by providing their own data to smart contracts which train the model. Since this project could involve interacting with public blockchains, it is strongly encouraged that personal data is not used when interacting with models. This project is meant to be a proof of concept. For greater privacy and control over data, a private and permissioned chain can be used by trusted collaborators. An overview of the project can be found in our <Link href='https://aka.ms/0xDeCA10B-blog1' target="_blank">blog post</Link>. </Typography> {this.validator.isEnabled() && <div> <Typography className={classes.sectionTitle} variant="h5" component="h5"> <Link color='inherit' href='#online-safety' name='online-safety'>Online Safety</Link> </Typography> <Typography className={classes.section} component="p"> Special precautions have been enabled to stop unvalidated text and images from showing in this platform. You may notice that model names, descriptions, data, classifications, or other values might be hidden. </Typography> </div>} <Typography className={classes.sectionTitle} variant="h5" component="h5"> <Link color='inherit' href='#code-of-conduct' name='code-of-conduct'>Code of Conduct</Link> </Typography> <Typography className={classes.section} component="p"> The <Link href='https://go.microsoft.com/fwlink/?LinkID=246338' target='_blank'>Code of Conduct</Link> for Microsoft should be followed. </Typography> <Typography className={classes.section} component="p"> Usually the blockchain you will select is public, therefore all data uploaded for training is effectively public. We encourage you not to upload nor use data with personal information. In our example smart contracts, getting predictions from models when you give data should not save the data to a public blockchain. You can tell if an action you take is saving data beyond your control because your browser should pop-up a notification asking you to confirm the transaction using something like <Link href='https://metamask.io/' target='_blank' rel="noopener">MetaMask</Link>. </Typography> <Typography className={classes.sectionTitle} variant="h5" component="h5"> <Link color='inherit' href='#learn-more' name='learn-more'>Learn More</Link> </Typography> <Typography className={classes.section} component="p"> The source code for this project can be found <Link href='https://aka.ms/0xDeCA10B' target="_blank">here</Link>. We also have a <Link href='https://aka.ms/0xDeCA10B-blog1' target="_blank">blog post</Link> explaining the purpose of this project. </Typography> <Typography className={classes.sectionTitle} variant="h5" component="h5"> <Link color='inherit' href='#version' name='version'>Version</Link> </Typography> <Typography className={classes.section} component="p"> {version} </Typography> </Container >) } } export default withStyles(styles)(About)
0xDeCA10B/demo/client/src/components/About.js/0
{ "file_path": "0xDeCA10B/demo/client/src/components/About.js", "repo_id": "0xDeCA10B", "token_count": 1796 }
1
pragma solidity ^0.6; import {Ownable} from "../ownership/Ownable.sol"; /** * Defines incentives for others to contribute "good" quality data. */ abstract contract IncentiveMechanism { struct AddressStats { uint128 numValid; uint128 numSubmitted; } mapping(address => AddressStats) public addressStats; /** * The total number of samples that have been submitted. */ uint public totalSubmitted = 0; /** * The total number of samples that have been determined to be good. */ uint public totalGoodDataCount = 0; // The following members are in chronologically increasing order of when they should occur. /** * Amount of time to wait to get a refund back. * Once this amount of time has passed, the entire deposit can be reclaimed. * Also once this amount of time has passed, the deposit (in full or in part) can be taken by others. */ uint32 public refundWaitTimeS; /** * Amount of time owner has to wait to take someone's entire remaining refund. * The purpose of this is to give the owner some incentive to deploy a model. * This must be greater than the required amount of time to wait for attempting a refund. * Contracts may want to enforce that this is much greater than the amount of time to wait for attempting a refund * to give even more time to get the deposit back and not let the owner take too much. */ uint32 public ownerClaimWaitTimeS; /** * Amount of time after which anyone can take someone's entire remaining refund. * Similar to `ownerClaimWaitTimeS` but it allows any address to claim funds for specific data. * The purpose of this is to help ensure that value does not get "stuck" in a contract. * This must be greater than the required amount of time to wait for attempting a refund. * Contracts may want to enforce that this is much greater than the amount of time to wait for attempting a refund * to give even more time to get the deposit back and not let others take too much. */ uint32 public anyAddressClaimWaitTimeS; // End claim time members. constructor( // Parameters in chronological order. uint32 _refundWaitTimeS, uint32 _ownerClaimWaitTimeS, uint32 _anyAddressClaimWaitTimeS ) public { refundWaitTimeS = _refundWaitTimeS; ownerClaimWaitTimeS = _ownerClaimWaitTimeS; anyAddressClaimWaitTimeS = _anyAddressClaimWaitTimeS; } /** * @return The current cost (in wei) to update a model with one sample of training data. */ function getNextAddDataCost() public virtual view returns (uint); /** * @param currentTimeS The current time in seconds since the epoch. * * @return The amount of wei required to add data at `currentTimeS`. */ function getNextAddDataCost(uint currentTimeS) public virtual view returns (uint); /** * @return The number of samples that have been determined to be good for `submitter`. */ function numValidForAddress(address submitter) public view returns (uint128) { return addressStats[submitter].numValid; } } /** * An `IncentiveMechanism` for data with 64-bit values. */ abstract contract IncentiveMechanism64 is Ownable, IncentiveMechanism { /** * This method allows contracts to change the required deposit depending on the specific data being added. * For example, one may want to enforce a higher deposit on outlier (different) data because it could be spam, * or they might want a lower deposit on it data because it could be important unique data. * * @param data A single sample of training data for the model. * @param classification The label for `data`. * @return The current cost to update a model with a specific sample of training data. */ function getNextAddDataCost(int64[] memory data, uint64 classification) public virtual view returns (uint); /** * Determine if the request to add data is acceptable. * * @param msgValue The value sent with the initial transaction to add data. * @param data A single sample of training data for the model. * @param classification The label for `data`. * @return cost The cost required to add new data. */ function handleAddData(uint msgValue, int64[] memory data, uint64 classification) public virtual returns (uint cost); /** * Notify that a refund is being attempted. * * @param submitter The address of the one attempting a refund. * @param data The data for which to attempt a refund. * @param classification The label originally submitted with `data`. * @param addedTime The time when the data was added. * @param claimableAmount The amount that can be claimed for the refund. * @param claimedBySubmitter True if the data has already been claimed by `submitter`, otherwise false. * @param prediction The current prediction of the model for data. * @param numClaims The number of claims that have been made for the contribution before this request. * @return refundAmount The amount to refund to `submitter`. */ function handleRefund( address submitter, int64[] memory data, uint64 classification, uint addedTime, uint claimableAmount, bool claimedBySubmitter, uint64 prediction, uint numClaims) public virtual returns (uint refundAmount); /** * Notify that data is being reported as bad or old. * * @param reporter The address of the one reporting about the data. * @param data The data being reported. * @param classification The label originally submitted with `data`. * @param addedTime The time when the data was added. * @param originalAuthor The address that originally added the data. * @param initialDeposit The amount initially deposited when the data was added. * @param claimableAmount The amount of the deposit that can still be claimed. * @param claimedByReporter True if the data has already been claimed by `reporter`, otherwise false. * @param prediction The current prediction of the model for data. * @param numClaims The number of claims that have been made for the contribution before this request. * @return rewardAmount The amount to reward to `reporter`. */ function handleReport( address reporter, int64[] memory data, uint64 classification, uint addedTime, address originalAuthor, uint initialDeposit, uint claimableAmount, bool claimedByReporter, uint64 prediction, uint numClaims) public virtual returns (uint rewardAmount); }
0xDeCA10B/demo/client/src/contracts/incentive/IncentiveMechanism.sol/0
{ "file_path": "0xDeCA10B/demo/client/src/contracts/incentive/IncentiveMechanism.sol", "repo_id": "0xDeCA10B", "token_count": 2108 }
2
const assert = require('assert') const tf = require('@tensorflow/tfjs-node') const { normalize1d, normalize2d, normalizeArray } = require('../tensor-utils-node') describe('tensor-utils-node', () => { it('normalize1d', () => { tf.tidy(() => { let v = tf.tensor1d([1, 0, 0]) let normalized = normalize1d(v) let expected = v assert.equal(normalized.equalStrict(expected).all().asScalar().dataSync()[0], 1) v = tf.tensor1d([1, 1, 1, 1]) normalized = normalize1d(v) expected = tf.tensor1d([1 / 2, 1 / 2, 1 / 2, 1 / 2]) assert.equal(normalized.equalStrict(expected).all().asScalar().dataSync()[0], 1) }) }) it('normalize1d 0-vector', () => { tf.tidy(() => { const zero = tf.tensor1d([0, 0, 0]) const normalized = normalize1d(zero) assert.equal(normalized.equalStrict(zero).all().asScalar().dataSync()[0], 1) }) }) it('normalize2d', () => { tf.tidy(() => { let m = tf.tensor2d([ [1, 0, 0, 0], [1, 1, 1, 1], ]) let normalized = normalize2d(m) let expected = tf.tensor2d([ [1, 0, 0, 0], [1 / 2, 1 / 2, 1 / 2, 1 / 2] ]) assert.equal(normalized.equalStrict(expected).all().dataSync()[0], 1) }) }) it('normalizeArray', () => { tf.tidy(() => { let v = [1, 0, 0] let normalized = normalizeArray(v) let expected = v assert.deepStrictEqual(normalized, expected) normalized = normalizeArray([1, 1, 1, 1]) expected = [1 / 2, 1 / 2, 1 / 2, 1 / 2] assert.deepStrictEqual(normalized, expected) }) }) })
0xDeCA10B/demo/client/src/ml-models/__tests__/tensor-utils-node.test.js/0
{ "file_path": "0xDeCA10B/demo/client/src/ml-models/__tests__/tensor-utils-node.test.js", "repo_id": "0xDeCA10B", "token_count": 683 }
3
import assert from 'assert' import { OnlineSafetyValidator } from '../validator' describe("OnlineSafetyValidator", () => { it("should validate", async () => { const validator = new OnlineSafetyValidator() // This is the example in the config but it might also pass // because online safety is disabled by an environment variable. const network = "private" const address = "0x1b88938102bE9ED97a0e9b8Cb321dD89C60e86Ab" assert(validator.isPermitted(network, address) === true) }) })
0xDeCA10B/demo/client/src/safety/__tests__/validator.test.ts/0
{ "file_path": "0xDeCA10B/demo/client/src/safety/__tests__/validator.test.ts", "repo_id": "0xDeCA10B", "token_count": 158 }
4
const NearestCentroidClassifier = artifacts.require("./classification/NearestCentroidClassifier") const { convertData } = require('../../../src/float-utils-node') contract('NearestCentroidClassifier', function (accounts) { const toFloat = 1E9 let classifier function normalize(data) { data = convertData(data, web3, toFloat) return classifier.norm(data).then(norm => { return data.map(x => x.mul(web3.utils.toBN(toFloat)).div(norm)) }) } function parseBN(num) { if (web3.utils.isBN(num)) { return num.toNumber() } else { assert.typeOf(num, 'number') return num } } function parseFloatBN(bn) { assert(web3.utils.isBN(bn), `${bn} is not a BN`) // Can't divide first since a BN can only be an integer. return bn.toNumber() / toFloat } before("deploy classifier", function () { const classifications = ["ALARM", "WEATHER"] const centroids = [ convertData([-1, -1], web3, toFloat), convertData([+1, +1], web3, toFloat), ] const dataCounts = [2, 2] return NearestCentroidClassifier.new(classifications, centroids, dataCounts).then(c => { classifier = c }) }) it("...should get the classifications", function () { const expectedClassifications = ["ALARM", "WEATHER"] return classifier.getNumClassifications().then(parseBN).then(numClassifications => { assert.equal(numClassifications, expectedClassifications.length, "Number of classifications is wrong.") let promises = expectedClassifications.map((_, i) => { return classifier.classifications(i) }) return Promise.all(promises).then(results => { assert.deepEqual(results, expectedClassifications, "Wrong classifications.") }) }) }) it("...should predict the classification", function () { const data = [-1.5, -0.5] return normalize(data).then(data => { return classifier.predict(data).then((prediction) => { assert.equal(prediction, 0, "Wrong classification.") }) }) }) it("...should predict the classification", function () { const data = [+0.5, +1.5] return normalize(data).then(data => { return classifier.predict(data).then((prediction) => { assert.equal(prediction, 1, "Wrong classification.") }) }) }) it("...should train", function () { const data = [+4, +4] const classification = 1 return normalize(data).then(normalizedData => { const promises = data.map((_, dimension) => { return classifier.centroids(classification, dimension).then(parseFloatBN) }) return Promise.all(promises).then(originalCentroidValues => { return classifier.dataCounts(classification).then(parseBN).then(originalDataCount => { return classifier.update(normalizedData, classification).then(() => { return classifier.dataCounts(classification).then(parseBN).then(dataCount => { assert.equal(dataCount, originalDataCount + 1, "Wrong data count.") const promises = normalizedData.map((dataVal, dimension) => { return classifier.centroids(classification, dimension).then(parseFloatBN).then(v => { assert.equal(v, (originalCentroidValues[dimension] * originalDataCount + parseFloatBN(dataVal)) / dataCount, `value for centroid[${dimension}]`) }) }) return Promise.all(promises) }) }) }) }) }) }) it("...should train negative numbers", function () { const data = [-4, -4] const classification = 0 return normalize(data).then(normalizedData => { const promises = data.map((_, dimension) => { return classifier.centroids(classification, dimension).then(parseFloatBN) }) return Promise.all(promises).then(originalCentroidValues => { return classifier.dataCounts(classification).then(parseBN).then(originalDataCount => { return classifier.update(normalizedData, classification).then(() => { return classifier.dataCounts(classification).then(parseBN).then(dataCount => { assert.equal(dataCount, originalDataCount + 1, "Wrong data count.") const promises = normalizedData.map((dataVal, dimension) => { return classifier.centroids(classification, dimension).then(parseFloatBN).then(v => { assert.equal(v, (originalCentroidValues[dimension] * originalDataCount + parseFloatBN(dataVal)) / dataCount) }) }) return Promise.all(promises) }) }) }) }) }) }) it("...should add class", function () { const centroid = [-1, +1] const newClassificationName = "NEW" const dataCount = 2 return classifier.getNumClassifications().then(parseBN).then(originalNumClassifications => { return classifier.addClass(convertData(centroid, web3, toFloat), newClassificationName, dataCount).then(info => { const events = info.logs.filter(l => l.event == 'AddClass') assert.lengthOf(events, 1) const event = events[0] assert.equal(event.args.name, newClassificationName) const newClassificationIndex = parseBN(event.args.index) assert.equal(newClassificationIndex, originalNumClassifications) return classifier.getNumClassifications().then(parseBN).then(newNumClassifications => { assert.equal(newNumClassifications, originalNumClassifications + 1) return classifier.classifications(newClassificationIndex).then(className => { assert.equal(className, newClassificationName) return classifier.dataCounts(newClassificationIndex).then(parseBN).then(foundDataCount => { assert.equal(foundDataCount, dataCount) }) }) }) }) }) }) it("...should extend centroids", async function () { const classification = 0 const extension = [2, 2] const originalCentroidValues = await Promise.all([...Array(2).keys()].map(dimension => { return classifier.centroids(classification, dimension).then(parseFloatBN) })) const expectedCentroidValues = Array.prototype.concat(originalCentroidValues, extension) await classifier.extendCentroid(convertData(extension, web3, toFloat), classification) for (let dimension = 0; dimension < expectedCentroidValues.length; ++dimension) { const v = await classifier.centroids(classification, dimension).then(parseFloatBN) assert.closeTo(v, expectedCentroidValues[dimension], 1 / toFloat, `value for centroid[${dimension}]`) } }) })
0xDeCA10B/demo/client/test/contracts/classification/nearestcentroidclassifier.js/0
{ "file_path": "0xDeCA10B/demo/client/test/contracts/classification/nearestcentroidclassifier.js", "repo_id": "0xDeCA10B", "token_count": 2205 }
5
#!/bin/bash set -ex # Get some libraries from OpenZeppelin. # Can't install these through npm since they use the 0.5 version of solidity. commit="04a1b21874e02fd3027172bf208d9658b886b658" safe_math_file="client/lib/SafeMath.sol" mkdir --parents `dirname "${safe_math_file}"` wget "https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-contracts/${commit}/contracts/math/SafeMath.sol" --output-document "${safe_math_file}" signed_safe_math_file="client/lib/SignedSafeMath.sol" mkdir --parents `dirname "${signed_safe_math_file}"` wget "https://raw.githubusercontent.com/OpenZeppelin/openzeppelin-contracts/${commit}/contracts/drafts/SignedSafeMath.sol" --output-document "${signed_safe_math_file}" # Change the first line to use the right compiler version. sed -i "1s/.*/pragma solidity ^0.6;/" "${safe_math_file}" "${signed_safe_math_file}"
0xDeCA10B/demo/setup_libs.sh/0
{ "file_path": "0xDeCA10B/demo/setup_libs.sh", "repo_id": "0xDeCA10B", "token_count": 299 }
6
from collections import Counter from injector import inject from sklearn.neighbors import NearestCentroid # Purposely not a singleton so that it is easy to get a model that has not been initialized. @inject class NearestCentroidClassifier(NearestCentroid): def fit(self, X, y): self._num_samples_per_centroid = Counter(y) super().fit(X, y) def partial_fit(self, training_data, labels): # Assume len(training_data) == len(labels) == 1 # Assume centroids are indexed by class 0-N. sample = training_data[0] label = labels[0] n = self._num_samples_per_centroid[label] self.centroids_[label] = (self.centroids_[label] * n + sample) / (n + 1) self._num_samples_per_centroid[label] = n + 1
0xDeCA10B/simulation/decai/simulation/contract/classification/ncc.py/0
{ "file_path": "0xDeCA10B/simulation/decai/simulation/contract/classification/ncc.py", "repo_id": "0xDeCA10B", "token_count": 303 }
7
# Objects for all smart contracts. from dataclasses import dataclass, field from typing import Optional from injector import singleton Address = str """ An address that can receive funds and participate in training models. """ @dataclass class Msg: """ A message sent to a smart contract. :param sender: The sender's address. :param value: Amount sent with the message. """ sender: Address # Need to use float since the numbers might be large. They should still actually be integers. value: float class RejectException(Exception): """ The smart contract rejected the transaction. """ pass class SmartContract(object): """ A fake smart contract. """ def __init__(self): self.address: Address = f'{type(self).__name__}-{id(self)}' """ The address of this contract. """ self.owner: Optional[Address] = None """ The owner of this contract. """ @singleton @dataclass class TimeMock(object): """ Helps fake the current time (in seconds). Ideally the value returned is an integer (like `now` in Solidity) but this is not guaranteed. Normally in an Ethereum smart contract `now` can be called. To speed up simulations, use this class to get the current time. """ _time: float = field(default=0, init=False) def __call__(self, *args, **kwargs): """ Get the currently set time (in seconds). """ return self._time def add_time(self, amount): """ Add `amount` (in seconds) to the current time. """ self._time += amount def set_time(self, time_value): """ Set the time to return when `time()` is called. """ self._time = time_value def time(self): """ Get the currently set time (in seconds). """ return self._time
0xDeCA10B/simulation/decai/simulation/contract/objects.py/0
{ "file_path": "0xDeCA10B/simulation/decai/simulation/contract/objects.py", "repo_id": "0xDeCA10B", "token_count": 617 }
8
import html import itertools import os from collections import Counter from dataclasses import dataclass, field from logging import Logger from pathlib import Path from typing import Dict, Iterator, List, Tuple import numpy as np import pandas as pd import requests from injector import ClassAssistedBuilder, Module, inject, provider, singleton from scipy.sparse import csr_matrix from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.utils import shuffle from tqdm import tqdm from .data_loader import DataLoader from .featuremapping.hashing.token_hash import TokenHash @inject @dataclass class OffensiveDataLoader(DataLoader): """ Load offensive data from https://github.com/t-davidson/hate-speech-and-offensive-language. """ _logger: Logger _token_hash: TokenHash max_num_features: int _seed: int = field(default=2, init=False) _train_split: float = field(default=0.7, init=False) _class_mapping = [ # Hate 0, # Offensive 0, # Neither (Safe) 1, ] def classifications(self) -> List[str]: return ["OFFENSIVE", "SAFE"] def load_data(self, train_size: int = None, test_size: int = None) -> (Tuple, Tuple): self._logger.info("Loading data.") data_folder_path = Path(__file__, '../../../../training_data/offensive/hate-speech-and-offensive-language').resolve() if train_size is not None and test_size is not None: max_num_samples = train_size + test_size else: max_num_samples = None data_path = data_folder_path / 'labeled_data.csv' if not data_path.exists(): data_url = 'https://github.com/t-davidson/hate-speech-and-offensive-language/raw/master/data/labeled_data.csv' self._logger.info("Downloading data from \"%s\" to \"%s\".", data_url, data_path) r = requests.get(data_url, allow_redirects=True) r.raise_for_status() os.makedirs(data_folder_path, exist_ok=True) with open(data_path, 'wb') as f: f.write(r.content) loaded_data = pd.read_csv(data_path) data = [] labels = [] class_index = list(loaded_data.columns).index('class') + 1 assert class_index > 0 for row in tqdm(loaded_data.itertuples(), desc="Loading data", unit_scale=True, mininterval=2, unit=" samples", total=max_num_samples or len(loaded_data), ): if max_num_samples is not None and len(data) > max_num_samples: break text = row.tweet text = self._pre_process(text) data.append(text) labels.append(self._class_mapping[row[class_index]]) if train_size is None: if test_size is None: train_size = int(self._train_split * len(data)) else: train_size = len(data) - test_size if test_size is None: test_size = len(data) - train_size data, labels = shuffle(data, labels, random_state=self._seed) x_train = itertools.islice(data, train_size) # Compute the top features. t = TfidfVectorizer(max_features=self.max_num_features, norm=None) t.fit(tqdm(x_train, desc="Computing top token features", total=train_size, unit_scale=True, mininterval=2, unit=" texts" )) top_tokens = t.get_feature_names() self._logger.debug("Some top feature names: %s", top_tokens[:30]) tokenize = t.build_analyzer() feature_tokens = set(t.get_feature_names()) def _featurize(text: str) -> Dict[int, int]: result = Counter(tokenize(text)) return {self._token_hash.hash(token): count for token, count in result.items() if token in feature_tokens} x_train = map(_featurize, itertools.islice(data, train_size)) x_train = self._build_sparse_matrix(x_train) y_train = np.array(labels[:train_size]) x_test = map(_featurize, itertools.islice(data, len(data) - test_size, len(data))) # TODO Might have to might sure it has the same number of columns as x_train. x_test = self._build_sparse_matrix(x_test) y_test = np.array(labels[-test_size:]) self._logger.info("Done loading data.") return (x_train, y_train), (x_test, y_test) def _pre_process(self, text: str) -> str: """ Handle some simple pre-processing specific to this dataset. """ return html.unescape(text) def _build_sparse_matrix(self, feature_mapped_data: Iterator[Dict[int, int]]): # Make a sparse matrix following the term-document example from: # https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html data = [] indptr = [0] indices = [] for feature_indices in feature_mapped_data: if len(feature_indices) > 0: i, d = zip(*feature_indices.items()) indices.extend(i) data.extend(d) indptr.append(len(indices)) return csr_matrix((data, indices, indptr), dtype=np.uint8) @dataclass class OffensiveDataModule(Module): max_num_features: int = field(default=1000) @provider @singleton def provide_data_loader(self, builder: ClassAssistedBuilder[OffensiveDataLoader]) -> DataLoader: return builder.build(max_num_features=self.max_num_features)
0xDeCA10B/simulation/decai/simulation/data/offensive_data_loader.py/0
{ "file_path": "0xDeCA10B/simulation/decai/simulation/data/offensive_data_loader.py", "repo_id": "0xDeCA10B", "token_count": 2591 }
9
# Lab 4 - AllReduce的实现和优化 ## 实验目的 1. 理解并行训练的原理和实现 2. 定制一个新的并行训练的通信压缩算法 ## 实验环境 * Ubuntu 18.04 * PyTorch==1.5.0 (务必安装CPU版本) * OpenMPI * Horovod==0.19.4 ## 实验原理 深度学习中,分布式训练算法和分布式训练系统的基本知识 ## 实验内容 ### 实验流程图 ![](/imgs/Lab4-flow.png "Lab4 flow chat") ### 具体步骤 1. 安装依赖支持:OpenMPI, Horovod 2. 编写程序,使用Horovod库,增加数据并行训练支持 1. 参照Horovod with PyTorch参考文档,修改 `mnist_basic.py` 文件, 另存为 `pytorch_mnist_horovod.py`,使用Horovod库实现数据并行 - Mnist_basic.py原始文件地址:https://github.com/pytorch/examples/blob/master/mnist/main.py - Horovod with PyTorch文档地址:https://github.com/horovod/horovod/blob/master/docs/pytorch.rst 2. 记录每个step的运行时间和正确率(accuracy) 3. 理解Horovod的执行逻辑,利用Numpy实现float8(8bit), float16(16bit)编码方案的压缩/解压缩 1. 克隆GitHub上Horovod库 2. 修改 `/horovod/torch/compression.py` 文件,增加Bit8Compressor和Bit16Compressor类,实现compress和decompress函数。(提示:torch.Tensor没有8-bit float类型支持,所以Bit8Compressor还需实现float32和float8类型的相互转化) 4. 修改Horovod库中代码,增加对float8(8bit), float16(16bit)格式的压缩 1. 修改 `/horovod/torch/mpi_ops.py` 文件,利用Horovod内嵌的AllGather通信和压缩接口,增加对float8(8bit), float16(16bit)格式的压缩代码的调用。 2. 重新build Horovod库。 5. 修改MNIST样例代码,增加压缩功能。 6. 测试代码正确性,比较原始代码、数据并行、加入压缩算法三者的性能差别。 7. [选做项目] 利用C++/CUDA API实现更为高效的压缩/解压缩编码 ## 实验报告 ### 实验环境 |||| |--------|--------------|--------------------------| |硬件环境|服务器数目|&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | ||网卡型号、数目|| ||GPU型号、数目|| ||GPU连接方式|| |软件环境|OS版本|| ||GPU driver、(opt. NIC driver)|| ||深度学习框架<br>python包名称及版本|| ||CUDA版本|| |||| ### 实验结果 比较原始串行训练,用Horovod并行训练,加入压缩算法三者,在同样epoch条件下的训练时间和结果正确率。 Epoch size: ___________ ||||| |-----|-----|-----|-----| | 训练算法 || &nbsp; &nbsp; &nbsp; &nbsp; 训练时间 &nbsp; &nbsp; &nbsp; &nbsp; | &nbsp; &nbsp; &nbsp; &nbsp; 结果正确率 &nbsp; &nbsp; &nbsp; &nbsp; | |串行训练|||| | 用Horovod并行 | Device# == 2 ||| ||Device# == 4||| | float8(8bit)压缩 | Device# == 2 ||| || Device# == 4 ||| | float16(16bit)压缩 | Device# == 2 ||| || Device# == 4 ||| ||||| ## 参考代码 ### 安装Horovod 安装OpenMPI:`sudo apt install openmpi-bin` 安装Horovod:`python3 -m pip install horovod==0.19.4 --user` ### 利用Horovod并行化pytorch MNIST模型训练 1. Device# == 1 运行命令:`python3 pytorch_mnist_horovod.py` 2. Device# == N (e.g., N == 2, 4, 6, 8) 运行命令:`horovodrun -n 2 python3 pytorch_mnist_horovod.py –hvd True ` 参考代码: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py ### 基于Horovod(v0.19.4)库增加bit-16和bit-8的并行训练的通信压缩算法 1. Build Horovod 运行命令:`HOROVOD_WITHOUT_MXNET=1 HOROVOD_WITHOUT_GLOO=1 HOROVOD_WITHOUT_TENSORFLOW=1 HOROVOD_WITH_PYTORCH=1 python setup.py build` 2. 在horovod库中需要修改的文件和代码片段: bit8,bit16.git_diff 3. 执行压缩算法进行训练 ``` mpirun -n 2 python pytorch_mnist_compress.py --bit8-allreduce mpirun -n 2 python pytorch_mnist_compress.py --bit16-allreduce ``` ## 参考资料 * Horovod with PyTorch 文档: https://github.com/horovod/horovod/blob/master/docs/pytorch.rst * Horovod MNIST并行训练参考代码:https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
AI-System/Labs/BasicLabs/Lab4/README.md/0
{ "file_path": "AI-System/Labs/BasicLabs/Lab4/README.md", "repo_id": "AI-System", "token_count": 2540 }
10
# 常见问题 ## 1. 构建部署PyTorch训练程序时出现 "BADSIG F60F4B3D7FA2AF80" 错误 ### 运行的命令 `docker build -f Dockerfile.gpu -t train_dl .` ### 错误日志 ```bash W: GPG error: https://developer.download.nvidia.cn/compute/machine-learning/repos/ubuntu1804/x86_64 Release: The following signatures were invalid: BADSIG F60F4B3D7FA2AF80 cudatools <[email protected]> E: The repository 'https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release' is not signed. ``` ### 原因 NVIDIA 的 CDN 更新了新的 GPG 签名缓存 `Release.gpg`,但对应的本体 `Release` 并未更新,这会造成上述错误。 ### 解决方法 解除 `Dockerfile.gpu` 内 `# 解决网络问题` 下命令的注释,令其运行将下载源换为 Aliyun 源,可以解决问题。
AI-System/Labs/BasicLabs/Lab5/issue.md/0
{ "file_path": "AI-System/Labs/BasicLabs/Lab5/issue.md", "repo_id": "AI-System", "token_count": 433 }
11
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 10.2.2 (分布式)强化学习对框架的需求和挑战 - [10.2.2 (分布式)强化学习对框架的需求和挑战](#1022-分布式强化学习对框架的需求和挑战) - [强化学习系统面临的挑战和机器学习系统相比,有什么不同?](#强化学习系统面临的挑战和机器学习系统相比有什么不同) - [强化学习对于框架的**需求**有哪些呢?](#强化学习对于框架的需求有哪些呢) - [小结与讨论](#小结与讨论) - [参考文献](#参考文献) ## 强化学习系统面临的挑战和机器学习系统相比,有什么不同? 首先, 为什么强化学习不能复用过去的机器学习或者深度学习的平台呢?事实上,在Github上关于强化学习的开源库有将近2w+个。但是大量的框架,都是难以复用的代码。 主要的原因有以下几点如下: - 强化学习算法的**可复现性**比较差。随机种子,参数,具体实现的差别等因素的变化,都可能会对强化学习的结果有较大的影响。 - 例如,在Rainbow[<sup>[2]</sup>](#rainbow) 里介绍可以结合DQN使用的六种技巧, 包括:Dueling DQN, DDQN, Prioritized DQN, Noisy DQN等。从图10.2.8上我们可以看出,应用不同的技巧会使得DQN的收敛值发生改变。同时采用6种技巧的Rainbow可以得到超越其他曲线的效果。这些技巧反映在代码上可能仅仅有几行代码的差别。 - 而另一篇文章[<sup>[1]</sup>](#implementation_ppo)里,给PPO带来真正的性能上提升以及将策略(policy)约束在信任域)trust region)内的效果,不是通过PPO论文中提出的对新策略和原策略的比值进行裁切(clip)带来的,而是通过代码层面(code-level)的一些技巧带来的。这侧面印证了,实现的不同实现也会对强化学习有比较大的影响。 <div align="center"> <img src="./img/trick_of_rainbow.png" ch="500" width="60%"/> </div> <div align=center>图10.2.8 结合了不同技巧的DQN的表现 </div> - 强化学习的**执行策略**多种多样。这个执行策略包括:运行的硬件或者机器(例如:GPU或者CPU, 单机或者集群),模块之间的交互模式(同步或者异步),通信的内容(数据或者梯度),通信架构的实现(多进程,MPI或者参数服务器)等。 <!-- <div align="center"> <img src="./img/exection_mode.png" ch="500" width="60%"/> </div> <center>图10.2.2.2 强化学习里不同的执行策略维度 </center> --> - 在传统的机器学习或者深度学习里,数据集通常是固定的。模型的任务就是拟合数据以求解最优解或者极优解,因此模型可以只跑在单一的机器(例如GPU)上。而强化学习因为数据是边采样得到的,采样和训练可以在不同的硬件或者机器上。例如,在IMPALA算法架构里,采样器的推理部分跑在128个CPU上,而学习器的模型训练部分跑在1个GPU上。 - 同时,根据采样和训练过程是否解耦,可以将强化学习分为同步算法或者异步算法。例如,PPO是一种同步算法,因为它会在模型采样到一定量的数据后开始训练,然后继续采样,往复迭代直到收敛。而ApeX是一种异步算法,它的采样进程将样本存储到重放缓冲区里,而训练进程从里面异步地采样数据进行训练。在ApeX-DQN里,采样过程和训练过程是异步进行交互的。 - 不同的强化学习算法的**结构差异**很大。 下图来源于RLlib[<sup>[3]</sup>](#rllib),从下表里可以看出,不同的算法族群,他们的架构差异是很大的,体现在是否使用以下组件上:策略评估(Policy Evaulation),重放缓冲区(Replay Buffer),基于梯度优化器(Gradient-based Optimizer),以及其他异构的难以归类的组件。 | 算法类 | 需要策略评估 | 有重放缓冲区 | 有基于梯度的优化器 | 其他的模块| | ---- | ---- | ---- | ---- | ---- | | DQN Based | &#10004; | &#10004; | &#10004; | | | Actor-critic Based | &#10004; | &#10004; | &#10004; | | | Policy Gradient | &#10004; | &#10004; | &#10004; | | | Model Based | &#10004; | &#10004; | &#10004; | Model-based Planning| | Multi-agent | &#10004; | &#10004; | &#10004; | | | Evolutionary Methods | &#10004; | | | Derivate Optimization| | AlphaGo | &#10004; | &#10004; | &#10004; | MCTS; Derivate Optimization| - 分布式强化学习**算法和架构互相影响**。新的架构通常可以让算法在原来的基础上跑得更快,但因此可能会带来收敛不好的问题;而通常需要提出新的算法,来解决新架构带来的收敛问题。例如,IMPALA提出了V-trace算法。该算法显著降低了(由和目标策略不一样的行为策略生成的)训练样本带来的影响,从而使得算法相比之前的工作,能在速度和效果获得提升。 我们重新总结以上四点强化学习对框架的**挑战**, 即: - 强化学习算法复现比较困难; - 分布式强化算法的执行策略多种多样; - 不同的强化算法结构存在差异; - 分布式强化算法和架构互相影响和变化; 但是,由于大部分的开源框架都是针对特定的算法和架构模式开发的,因此这些开源框架难以适配到不同的分布式强化学习算法下,难以满足通用分布式强化学习框架的需求。 ## 强化学习对于框架的**需求**有哪些呢? 通过对于挑战的分析,我们可以总结出以下需求(包括但不仅限于): - 强化学习框架需要有较好的可扩展性;良好的可扩展性可以让用户模块化编程,使得编程更高效清晰。 可扩展性的需求体现在以下几个方面(包括但不限于): - 通用的用户友好的强化学习算法接口;不同的强化学习的算法的结构差异很大,导致当前的开源框架难以用一个统一的接口去实现大部分的算法,因而用户难以在一个框架上自定义算法。这也是导致Github上许多用户自己开发特定框架的原因。 - 支持可复现各种强化学习算法和架构; - 支持不同的强化学习的执行策略; - 高效率高并发的数据采集; 在传统的机器学习里,数据集通常都是预先定义好的。和传统机器学习不同,强化学习需要迭代地收集数据和训练数据,并且自主地决定采样什么样地数据。因而,数据量的大小和模型的效果相关,采样数据的效率是强化学习的关键; 由于工业界的环境可能是在单独的服务器或者机器上,因而与环境交互的时间可能会会较长,而导致采样进程的效率低下。将采样过程并行化通常是一个可行的策略,但同时也给用户带来了分布式编程的成本。 ELF[<sup>[5]</sup>](#elf)等工作利用C++ 线程并行托管多个环境实例,为用户提供了一个高效的轻量级的环境模拟库。这些工作的好处是免去了用户的开发成本,但同时他们也自身的局限。他们的局限在于只能支持特定领域的模拟器; 高效率高并发的数据采集的需求进一步划分包括但不仅限于: - 支持与环境的多种交互方式。例如,是把数据推送(push)给环境,还是主动从环境中拉取(pull)数据; - 提供易用的分布式编程模式(programming API),减少用户的开发成本; - 高性能的通信框架; 在强化学习尤其是分布式强化学习里,由于模块较多,模块可能分布在不同的硬件上,因而需要在不同的硬件之间传输数据。同时,不同的模块之间通信的传输的信息量可能跨度很大。 例如在图10.2.9中,ApeX的架构里,采样器的进程会将采样到的数据,从内存里传送到GPU的内存上;而学习器的进程会将更新的模型参数,从GPU的内存里传送到内存里给采样器进程做推理。 高性能的通信框架的需求进一步划分包括但不仅限于: - 支持简单易用的通信接口。 - 减少上下文切换的代价。 - 优化数据的传输。 例如,利用压缩技术,增加传输的吞吐量(throughput)或者减少传输数据的大小。 <div align="center"> <img src="./img/context_switch.png" ch="500" width="80%"/> </div> <div align=center>图10.2.9 ApeX架构里的上下文切换 </div> 另外,有部分强化学习开源框架(例如Surreal[<sup>[4]</sup>](#surreal))也在为强化学习的可复现性而努力,包括支持可复现的强化学习算法,提出一些支持复现的数据集等等。 ## 小结与讨论 在本章小节里,我们讨论了强化学习框架和系统面临的种种挑战。相比于深度学习框架来说,强化学习框架更具有挑战性。而面对这些挑战,我们提出了当前强化学习框架和系统面临的需求,并且给出了部分当前框架里的解决方案和思路。 ## 参考文献 <div id="implementation_ppo"></div> 1. Engstrom, Logan, et al. "Implementation matters in deep policy gradients: A case study on PPO and TRPO."  <div id="rainbow"></div> 2. Hessel M, Modayil J, Van Hasselt H, et al. Rainbow: Combining improvements in deep reinforcement learning[C.//Thirty-second AAAI conference on artificial intelligence. 2018. <div id="rllib"></div> 3. Liang, Eric, et al. "Ray rllib: A composable and scalable reinforcement learning library." <div id="surreal"></div> 4. Fan L, Zhu Y, Zhu J, et al. Surreal: Open-source reinforcement learning framework and robot manipulation benchmark[C.//Conference on Robot Learning. PMLR, 2018: 767-782. <div id="elf"></div> 5. Tian Y, Gong Q, Shang W, et al. Elf: An extensive, lightweight and flexible research platform for real-time strategy games[J.. Advances in Neural Information Processing Systems, 2017, 30.
AI-System/Textbook/第10章-强化学习系统/10.2.2-分布式强化学习对框架的需求和挑战.md/0
{ "file_path": "AI-System/Textbook/第10章-强化学习系统/10.2.2-分布式强化学习对框架的需求和挑战.md", "repo_id": "AI-System", "token_count": 6571 }
12
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 11.3 模型压缩与硬件加速 模型压缩的一个潜在缺点是,部分经过压缩后的模型并不一定适用于传统的通用硬件,如CPU和GPU,往往需要定制化硬件的支持。 例如对于模型稀疏化后的网络模型来说,如果没有专用的稀疏计算库或者针对稀疏计算的加速器设计,则无法完全发挥稀疏所能带来的理论加速比。 对于经过数值量化之后的网络模型,很多硬件结构并不支持低比特运算,例如CPU 中只支持Short,Int,Float,Double等类型,而定制化硬件可以对不同比特的网络进行特定的支持。 因此,相关的研究工作如稀疏神经网络加速器和低比特神经网络加速器也被相继提出。 - [11.3 模型压缩与硬件加速](#113-模型压缩与硬件加速) - [11.3.1 深度学习专用硬件](#1131-深度学习专用硬件) - [11.3.2 稀疏模型硬件加速](#1132-稀疏模型硬件加速) - [结构化稀疏与非结构化稀疏](#结构化稀疏与非结构化稀疏) - [半结构化稀疏](#半结构化稀疏) - [11.3.2 量化模型硬件加速](#1132-量化模型硬件加速) - [小结与讨论](#小结与讨论) - [参考文献](#参考文献) ## 11.3.1 深度学习专用硬件 深度学习模型不仅仅是学术研究前沿,在工业和生活中也应用广泛,具有广阔的市场前景和经济价值。 因此其重要性也促使了针对深度学习的领域专用架构或AI专用芯片的出现和发展。 基于ASIC(Application Specific Integrated Circuit)实现AI专用芯片,可以在芯片电路级别对深度学习模型的计算和访存特性进行全面深度定制,相比于GPGPU可以达到更高的性能和效能提升。 当然AI专用芯片也损失了一定的通用性和灵活性。 以 2015 年的谷歌TPU芯片为代表和开端,AI芯片进入了发展的爆发期。 TPU也是AlphaGo战胜人类顶尖围棋选手李世石、柯洁的幕后英雄,相比较于CPU+GPU的硬件平台计算和反应速度更快。TPU也为谷歌在其他人工智能领域的创新和突破起到重要支撑作用。 得益于TPU在芯片内部定制了专用于矩阵乘法计算的脉动阵列架构,因此实现了比同时期CPU和GPU更高的计算效率。 为深度学习定制的体系结构和硬件加速器可以实现更高效的计算单元和达到更高的并行度,减少通用计算设备中不必要的开销,从而使得深度学习计算达到更高的吞吐量、更低的延迟和更高的能效。 在之后的几年中,国内的互联网公司例如阿里巴巴、华为等也都设计了自己的AI专用芯片。 下图分别展示了谷歌 TPU芯片,阿里巴巴Hanguang芯片,华为Ascend芯片。 <center> <img src="./img/3/AI芯片.jpg" width="1000" height="260" /></center> <center>图11.3.1 AI芯片:谷歌TPU,阿里巴巴Hanguang,华为Ascend</center> ## 11.3.2 稀疏模型硬件加速 ### 结构化稀疏与非结构化稀疏 最早提出的模型稀疏化方法是细粒度的权值剪枝。深度神经网络中存在大量数值为零或者接近零的权值,模型剪枝合理的去除这些“贡献”很小的权值,在很多模型中能够讲模型大小压缩10倍以上,同时也意味着可以减少10倍以上的模型计算量。尽管听起来非常美好,现实并不尽如人意。模型剪枝带来的的稀疏性,从计算特征上来看非常的“不规则”,这对计算设备中的数据访问和大规模并行计算非常不友好。例如对GPU来说,我们使用cuSPARSE稀疏矩阵计算库来进行实验时,90%稀疏性(甚至更高)的矩阵的运算时间和一个完全稠密的矩阵运算时间相仿。也就是说,尽管我们知道绝大部分的计算是浪费的(90%稀疏性意味着性能提升的上限是10倍),却不得不忍受“不规则”带来的机器空转和消耗。 这种“不规则”的稀疏模式通常被称为非结构化稀疏(Unstructured Sparsity),在有些文献中也被称为细粒度稀疏(Fine-grained Sparsity)或随机稀疏(Random Sparsity)。 顺着这个思路,许多研究开始探索通过给神经网络剪枝添加一个“规则”的约束,使得剪枝后的稀疏模式更加适合硬件计算。 例如使非零值的位置分布不再是随机的,而是集中在规则的子结构中。 相比较于细粒度剪枝方法针对每个权值进行剪枝,粗粒度剪枝方法以组为单位对权值矩阵进行剪枝,使用组内的最大值或平均值为代表一组权值的重要性。 这种引入了“规则”的结构化约束的稀疏模式通常被称为结构化稀疏(Structured Sparsity),在很多文献中也被称之为粗粒度稀疏(Coarse-grained Sparsity)或块稀疏(Block Sparsity)。 但这种方法通常会牺牲模型的准确率和压缩比。 结构化稀疏对非零权值的位置进行了限制,在剪枝过程中会将一些数值较大的权值剪枝,从而影响模型准确率。 “非规则”的剪枝契合了神经网络模型中不同大小权值的随机分布,这对深度学习模型的准确度至关重要。而这种随机分布是深度学习模型为了匹配数据特征,通过训练后所得到的固有结果,为了迎合计算需求而设定的特定稀疏分布会增加破坏模型表达能力的风险,降低模型的准确度和压缩比。大量的研究工作也验证了这个观点。 综上所述,深度神经网络的权值稀疏存在模型有效性和计算高效性之间的权衡,如图 11.3.2 所示。 非结构化稀疏模式可以保持高模型压缩率和准确率,但因为不规则的稀疏模式对硬件不友好,导致很难实现高效的硬件加速。 而结构化稀疏使得权值矩阵更规则更加结构化,更利于硬件加速,但同时因为对权值的空间位置分布进行了限制,牺牲了模型压缩率或准确率。 结构化稀疏在不损失模型准确率的情况下,所能达到的压缩率远低于非结构化稀疏,或者在达到相同压缩率的情况下,所能维持的模型准确率远低于结构化稀疏。 <center> <img src="./img/3/稀疏平衡.jpg" width="1000" height="600" /></center> <center>图11.3.2 稀疏模型有效性和计算高效性权衡</center> ### 半结构化稀疏 那么,我们如何设计一个更好的稀疏模式以同时实现模型有效性和计算高效性两个目标? 在模型有效性方面,为了能够达到高模型准确率和压缩率,稀疏模式应该在稀疏结构上增加很少的约束,以保持非零权值分布的随机性。 在计算高效性方面,为了实现高性能稀疏矩阵乘法计算,需要使非零权值分布具有规则性,以消除不规则访存和计算。 “随机”与“规则”看似一对矛盾的概念,非此即彼。如果要两者兼顾,就不得不各自有所损失。然而在深度神经网络中,“随机”是权值分布上的随机,并不完全等于计算上的随机。权值上的“随机”与计算上的“规则”并不是一个绝对矛盾的概念,这就给调和这一对矛盾提供了空间,让我们得以取得既快又准的稀疏模型。论文提出了一种在权值上“ 随机”但非常适合硬件计算的稀疏化方法-组平衡稀疏(Bank Balanced Sparsity)。 在组平衡稀疏矩阵中,矩阵的每一行被分成了多个大小相等的组,每组中都有相同的稀疏度,即相同数目的非零值。 下图举例说明了组平衡稀疏模式的结构并与非结构化稀疏和结构化稀疏进行了直观的比较。 在这个例子中,三个具有不同的稀疏结构的稀疏矩阵都是从图a中稠密权值矩阵剪枝得到的,稀疏度都是50%。 细粒度剪枝将所有权值排序并剪枝掉绝对值最小的50%的权值,从而得到了图 b 中的非结构化稀疏矩阵。 粗粒度剪枝针对 2x2 的权值块进行剪枝,每块权值的重要性由块平均值代表,从而得到了图 c 中的结构化稀疏(块稀疏)矩阵。 组平衡剪枝将每一个矩阵行分成了两个组,每个组内进行独立的细粒度剪枝,去除在每个组内绝对值最小的 50% 的权值,从而得到了d图中的组平衡稀疏矩阵。 <center> <img src="./img/3/稀疏比较.jpg" width="1000" height="500" /></center> <center>图11.3.3 不同稀疏模式的比较</center> 由于我们在每个bank内使用细粒度剪枝,因此能够很大程度地保留那些数值较大的权值,保持权值的随机分布,从而保持高模型准确率。同时这种方法得到的稀疏矩阵模式将矩阵进行了平衡的分割,这有利于硬件解决不规则的内存访问,并对矩阵运算实现高并行度。 近年来,GPU和专用AI芯片也逐渐开始支持稀疏神经网络。 英伟达在2020年发布了A100GPU,其稀疏张量核使用了一种称为细粒度结构化稀疏(Fine-grained Structured Sparsity)的权值稀疏模式。 英伟达提出的细粒度结构化稀疏与组平衡稀疏解决的是相同的模型有效性和计算高效性的权衡问题,采用了相似的设计思想,因此稀疏结构也非常相似。 图 5-7 介绍了英伟达提出的细粒度结构化稀疏与 A100 GPU 的稀疏张量核。 细粒度结构化稀疏也称之为 2:4 结构化稀疏(2:4 Structured Sparsity)。 在其剪枝过程中,权值矩阵首先被切分成大小固定为 4 的向量,并且稀疏度固定为50%(2:4)。 2:4 结构化稀疏可以视为组平衡稀疏的一种特殊情况,即将组大小设置为4,将稀疏度设置为 50%。 英伟达将细粒度结构化稀疏应用到图像、语言、语音等任务的模型中,实验结构表明不会对模型准确率造成显著影响,在模型有效性上与组平衡稀疏的结论相一致。 基于 2:4 结构化稀疏,A100 GPU 可以实现两倍的理论加速比,印证了组平衡稀疏的计算高效性。 <center> <img src="./img/3/A100_sparse.jpg" width="1000" height="500" /></center> <center>图11.3.4 A100 GPU的稀疏方法 (<a href="https://www.nvidia.com/en-us/data-center/a100/">图片来源</a>)</center> 无论是组平衡稀疏,还是A100中提出的细粒度结构化稀疏,我们都可以将其称之为半结构化稀疏(Semi-structured Sparsity)。半结构化稀疏很好地解决了稀疏模型存在的模型有效性和计算高效性之间的权衡,应用也越来越广泛。 ## 11.3.2 量化模型硬件加速 对于量化模型的硬件加速方法较为直接,实现相应比特数的计算单元即可。 在处理器芯片中,低比特计算单元则可以使用更少的硬件资源在更低的延迟内得出计算结果,并且大大降低功耗。 TPU的推理芯片中很早就使用了INT8,在后续的训练芯片中也采用了BF16数制度。 英伟达从A100中已经集成了支持INT4,INT8,BF16的混合精度计算核心,在最新发布的H100中甚至支持了BF8。 ## 小结与讨论 将软硬件分离,单独从硬件端进行定制优化或从软件端进行算法优化并不足以弥补算力供需之间的差距。 为了实现高性能人工智能计算,需要将算法和硬件的设计及优化统一起来,同时挖掘算法和硬件的潜力。 在算法设计时结合硬件平台的特性对算法进行优化从而减少对算力的需求,减轻硬件的负担。 进一步在硬件设计时根据算法的特性定制计算和存储微结构,最终实现性能的提升。 在可预见的未来,不仅仅是人工智能领域发展更加迅速,应用更加广泛,其他领域例如物联网、区块链等也将蓬勃发展,软硬件协同设计将发挥更大的作用。 ## 参考文献 1. https://www.nvidia.com/en-us/data-center/a100/ 2. https://www.jiqizhixin.com/articles/2019-06-25-18
AI-System/Textbook/第11章-模型压缩与加速/11.3-模型压缩与硬件加速.md/0
{ "file_path": "AI-System/Textbook/第11章-模型压缩与加速/11.3-模型压缩与硬件加速.md", "repo_id": "AI-System", "token_count": 8734 }
13
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 13.2 学习增强系统的应用 我们介绍几个学习增强系统的代表性工作。本章包含以下内容: - [13.2 学习增强系统的应用](#132-学习增强系统的应用) - [13.2.1 流媒体系统](#1321-流媒体系统) - [13.2.2 数据库索引](#1322-数据库索引) - [13.2.3 系统性能和参数调优](#1323-系统性能和参数调优) - [13.2.4 芯片设计](#1324-芯片设计) - [13.2.5 预测性资源调度](#1325-预测性资源调度) - [小结与讨论](#小结与讨论) - [参考文献](#参考文献) ## 13.2.1 流媒体系统 流媒体系统允许用户通过网络边下载边播放视频,是如今互联网的重要应用场景。为了优化用户体验,流媒体运营商广泛采用了根据用户网络状况而动态调整视频码率的自适应流(Adaptive Bitrate Streaming)。这种部署在自适应流的动态码率调整算法被称为码率自适应(Adaptive Bit Rate)算法,简称 ABR 算法。 在编码视频时,自适应流媒体系统事先将视频切分成若干几秒钟长的切片,并逐一将视频切片编码为不同码率。同一切片的不同码率可以互相替代,只是高码率的切片通常画质更清晰,但文件大小也更大。在播放视频时,流媒体客户端中运行的 ABR 算法会根据实时网络状况向服务器依次请求拥有最合适码率的视频切片,以达到优化用户体验质量(Quality of Experience,QoE)的目的。简单来说,QoE 主要由视频的清晰度和流畅度组成;优化 QoE 的关键即是在不影响视频流畅度的同时播放更清晰的视频。 ABR 算法的难点之一在于用户的网络状况可能不稳定,带宽会随时间变化。比如,用户在移动网络下观看视频时带宽波动可能较大,这使得 ABR 算法难以简单地根据过去观测到的下载速度来预估未来的网络带宽,从而导致算法误选过高或过低的码率,带来视频卡顿或画质不清晰的问题。ABR 算法的难点之二在于其每一步的码率决策不只影响当下的视频切片,而是对未来的码率选择也有影响。举例来说,如果 ABR 算法选择请求了高码率的切片,但用户的网络同时突然变差,那么客户端本地的视频缓存则会逐渐耗尽,继而导致 ABR 算法只好请求一系列低码率的视频切片以避免潜在的卡顿。也就是说,优化长期平均的 QoE 需要 ABR 算法对未来提前做出规划、进行序列决策(Sequential Decision Making)。同时,这也意味着强化学习(Reinforcement Learning,RL)可以适用于此问题。 为了应对以上挑战、弥补传统 ABR 算法的缺陷,麻省理工学院和斯坦福大学先后提出了基于强化学习的 ABR 算法。这里,我们着重介绍来自斯坦福大学的工作 Puffer。然后,我们概述麻省理工学院的工作 Pensieve,并对比两者应用强化学习的异同之处。 Puffer 的作者为了在真实网络环境中研究视频流算法,首先搭建了一个大规模流媒体直播平台 ([puffer.stanford.edu](https://puffer.stanford.edu))。该平台允许美国的用户免费收看六个电视频道,并部署了多种 ABR 算法进行随机对照实验。迄今为止,Puffer 已有超过 20 万的真实用户注册,收集了上百年长度的视频观看数据。同时,Puffer 每天自动发布匿名后的实验数据供公众进行分析,也将平台开放给学术研究人员来测试新的 ABR 算法。此外,Puffer 论文中还提出了一种新的基于强化学习的 ABR 算法 —— Fugu。在不影响理解的前提下,我们简化并描述 Fugu 算法如下。 为了解决 ABR 算法的难点之一,Fugu 首先训练了一个用于传输时间预测的神经网络,简称 TTP(Transmission Time Predictor)。给定某码率视频切片的大小,TTP 能够从历史数据中学习到如何精确地预测该切片从服务器传输到客户端的时间。TTP 的输入包含 ***(1)*** 最近下载的 *t* 个视频切片的大小;***(2)*** 最近下载的 *t* 个视频切片的传输时间;***(3)*** Puffer 服务器上的 TCP 统计信息;以及 ***(4)*** 希望预测传输时间的视频切片大小。TTP 的输出正是对这一切片传输时间的预测。由于在数据收集时能够观测到下载过的所有切片的传输时间,于是在训练 TTP 时自然地可以使用标准的监督学习(Supervised Learning)来最小化 TTP 的预测误差,即预测传输时间与真实传输时间的平均差距。 针对 ABR 算法的难点之二,Fugu 在线下训练好 TTP 网络之后,在线上决策时采用了一种传统的控制论算法 —— 模型控制预测(Model Predictive Control,MPC)算法。具体来说,MPC 向前考虑长度为 *h* 个切片的时间窗口,并枚举该窗口内所有的决策“路径”,即由 *h* 个切片的码率依次组成的序列(在实际运行中 Fugu 通过动态规划避免了枚举)。对于每条决策路径,MPC 模拟、计算出在该时间窗口内可获得的总 QoE,并选择 QoE 总和最大的路径。虽然如此,在执行完最优路径上的第一个决策后,即下载完指定码率的下个切片时,MPC 会重复相同的方法进行规划,以便纳入最新观测到的系统状态(如该切片的下载时间、当前视频缓存大小等),避免更多的决策误差。 结合起来,如下图所示,Fugu 在线上运行 MPC 算法时,不断从 Puffer 服务器上获取更新的系统状态,并反复调用 TTP 网络来计算出最大化 QoE 的码率选择。每隔一段时间后,Fugu 会整合服务器上最近收集到的数据,重新在扩展后的数据集上训练 TTP 网络,然后部署训练完毕的 TTP 在服务器上。总的来说,Fugu 这类依赖数据对环境(即自适应流系统)建模,然后基于学到的模型做规划和控制的方法被称为基于模型的强化学习(Model-based RL)。 <center><img src="./img/2/fugu.png" width="400"/></center> <center>图13-2-1. Fugu 使用基于模型的强化学习算法。</center> <br> 通过长达八个月在 Puffer 真实用户上运行的实验,作者发现 Fugu 的性能优于现有的其它四个算法,包括我们即将介绍的 Pensieve。由于 QoE 不存在统一的定义,作者分别比较了组成 QoE 的常见维度。结果显示,在所有 ABR 算法中,Fugu 实现了最高的视频画质(由一种衡量画质的 SSIM 参数描述),最小的画质浮动(相邻视频切片的平均 SSIM 差),以及接近最低的视频卡顿。此外,在 Fugu 被使用时,Puffer 用户的平均观看时长也高于其它算法。 相较于 Fugu 的基于模型的强化学习算法,Pensieve 则采用了典型的无模型的强化学习(Model-free RL)。无模型的强化学习不试图对环境建模(比如不去明确地预测未来网络的带宽或者相对应的视频切片的下载时间),而是通过与环境交互、依赖试错的方式来直接学习最优的策略。这类算法往往需要更多的训练数据、训练更不稳定、行为也更难解释,但好处是在环境复杂到无法建模时也可以学习,同时也避免了先学习模型、再进行控制所带来的潜在的双重误差。 <center><img src="./img/2/pensieve.png" width="550"/></center> <center>图13-2-2. Pensieve 的模型使用了 A3C。</center> <br> 简单来说,Pensieve 将希望学习的 ABR 算法参数化为一个神经网络,使其在每一步可以直接输出最优的切片码率。Pensieve 使用了名为 A3C(Asynchronous Advantage Actor Critic)的算法;作者指出选择 A3C 的原因为多个客户端的反馈可同时用于线上训练。图13.2.2 展示了模型的输入: ***(1)*** 之前 *k* 个切片的吞吐量;***(2)*** 之前 *k* 个切片的下载时间;***(3)*** 下个切片所有可选码率对应的大小;***(4)*** 当前视频的缓存大小;***(5)*** 该视频中仍未下载的切片数量(假设视频长度有限);***(6)*** 上一切片下载时的码率。为了训练模型,Pensieve 搭建了一个模拟自适应流的环境,包括模拟网络。在模型输出一个码率决策后,该模拟环境会模拟切片的下载、并计算出 QoE 作为模型的奖励(Reward)。随后,Pensieve 通过一种策略梯度(Policy Gradient)的算法通过反向传播(Backpropagation)将神经网络的参数向着期望奖励更高的方向进行调整。A3C 算法在一般策略梯度算法上做的优化在此不过多赘述;感兴趣的读者可以阅读原文。 与 Fugu 相比,Pensieve 必须与环境交互才能学习。由于在真实网络上训练速度太慢、成本太高,所以 Pensieve 只能搭建一个模拟环境(和网络)来用于训练。然而,由于模拟网络与真实网络不可避免地存在差异(Simulation-to-Reality Gap),Puffer 的作者发现这种差异导致了 Pensieve 的模型泛化(Generalization)能力变差,即部署在真实的自适应流系统后 QoE 不如预期。相比之下,Fugu 不需要与任何(模拟或者真实)环境交互,可以直接使用最终测试环境上收集到的数据来训练,所以不存在泛化问题、实际表现更好。这也是 Puffer 论文标题中 “Learning *in situ*” 的来历。 ## 13.2.2 数据库索引 索引技术在数据库中扮演着重要角色。索引是一种结构,来对数据库表中一个或多个列(例如人名的姓氏列)的值进行排序。索引的目的在于定位表里的数据,进而提升数据查询的效率。一个例子是范围查询,或返回所有首字母为 "L" 的姓氏值。如果没有索引,这些查询则需要遍历整个数据库表。 主流的索引实现通常是基于平衡树,即 B tree 或 B+ tree。平衡树的叶节点储存着数据的物理位置。由于平衡树的高度可以很小,每次的数据查询只需要几次的树查询。但是这些索引是广义目的的数据结构,没有利用到被索引数据的分布特征。所以,在一些极端情况下,它们可能会表现得较差。比如,当数据键值为从 1 递增到 n,如果使用 b-tree 索引,查询的时间复杂度为平衡树常见的 $O(log\ n)$。但是,理想的情况下,如果利用排序数据键值为位置的特性,则只需要 $O(1)$ 的复杂度。同样,索引的空间复杂度也只需要 $O(1)$,而不是平衡树常见的 $O(n)$。 我们可以先想想,为什么数据库索引有可能被机器学习这一类的方法来解决。美国麻省理工学院的学习索引(Learned Index)的动机就在于是否能够用模型,来学习到数据的分布特征,进一步提升数据查询的效率。学习索引要达到的是学习到一个映射函数,$f(key)$ $\rightarrow$ $pos$;将 $key$ 写成 $x$,$pos$ 写成 $y$,希望学习到一个模型 $f(x)$ 约等于 $y$。在上面的极端例子里,因为 $x$ 是排序过的,所以 $f$ 可以被看成是给数据抽象成 CDF。换句话说,学习索引的模型是学习此 CDF。 学习索引的作者首先尝试的方案是使用训练一个 2 层全连接的神经网络;每层 32 个单元,并使用 ReLU 作为激发函数。这神经网络的输入为搜索的键值,输出为物理位置的预测。实验结果表明,此模型每秒大概能执行 1250 预测。但这性能远远比不上 B-Tree 索引每秒大约 1111111 次的执行。作者指出了几个可能的原因:第一,TensorFlow 的设计在于有效地运行大模型,并不是小模型。第二,不像 B-tree,神经网络所有的单元都必须参与计算。第三,神经网络擅长于学习数据的宏观趋势;如果需要针对性地去学习数据里某一部分的细节,则会带来巨大的空间和运算开销。换句话说,这是一个数据空间变小以后模型的预测能力变差的问题。作者称此问题为 Last Mile。 基于以上的这三个问题,作者提出了 Learning Index Framework(LIF)。首先,LIF 转换 TensorFlow 的模型到一个 C++ 的表达形式,来加速对小模型的推理。另外,作者提出了 Recursive Model Index (RMI) 的递归模型索引来解决 Last Mile 的问题。RMI是一种层级化的架构,包含许多个模型。每一层中的模型都接收键值作为输入,然后据所得到的预测来选择下一层需执行的模型。这流程一直持续到最后一层,然后 RMI 输出在最后一层模型对位置的预测。从概念上来说,每一个模型都可以看作是对键值空间的某一部分负责。而 RMI 在逐层选择的过程中,逐渐降低了预测误差。 <br/> <center><img src="./img/2/recursive_learned_index.png" width="550"/></center> <center>图13-2-3. 学习索引的 Recursive Model Index (RMI)。</center> <br/> 实验结果显示出,与 B Tree 相比,学习索引可以更快,消耗的空间也最多可以节省 99%。但是,学习索引目前假设静态工作负载,也就是数据库表只读而不写。虽然如此,学习索引并不是有意图地去替代现有的索引,而是提供了另外一种构建数据库索引的思路。值得注意的是,学习索引启发了之后很多的工作,比如 ALEX(如何实现高效地写入操作),APEX(如何在持久内存上实现学习索引),XStore(如何在分离式内存上实现学习索引)等等。 ## 13.2.3 系统性能和参数调优 现代系统里有很多的设定与配置参数。透过调整这些设定与配置参数,系统工程师可以改变系统的行为,进而提高系统效能。一个例子是 MySQL 数据库 —— MySQL 有着上百个参数,从缓存相关的(如 `query_cache_size`,`key_cache_block_size`,`read_buffer_size`),磁盘缓式写入相关的(如 `delay_key_write`,`flush_time`),并发线程相关的(如 `innodb_commit_concurrency`),到连接通信的(如 `max_connections`,`net_buffer_length`),等等。有趣的是,许多系统的设定与配置参数的数量有着增长的趋势。在 “*Understanding and Dealing with over-Designed Configuration in System Software*” 这篇论文里,作者对 MySQL 数据库,Apache 网页服务器,和 Hadoop 大数据运行系统做了一个调查来量化这趋势。比如,从 1999 到 2015 年,MySQL 的设定与配置参数从大约 200 到 450;从 1998 到 2014 年,Apache 的设定与配置参数从大约 150 到 600;从 2006 到 2014 年,Hadoop 的设定与配置参数从大约 20 到 180。另外,当我们考虑到一个大型系统可能是由许多个子系统组成(例如网页服务器和数据库),这些大型系统的参数数量以指数级地增长。 调整这些设定与配置参数需要工程师理解系统的行为是如何被每一个参数所影响。然而,参数和系统性能的关系是一个高维度的非线性空间,而这空间超出了人的理解能力。所以,对于工程师而言,他们不确定手调的设定与配置是否最优,也很难知道如何有效地找到最优的设定与配置。 我们可以先想想,为什么系统配置参数调优有可能被机器学习这一类的方法来解决。这是因为它可以被看成是一个空间搜索的问题,而这类问题能在贝叶斯优化(Bayesian Optimization,BO)的框架下被解决。简单来说,我们可以先对不同的系统设定与配置,来做性能评测。这些数据的采样可以被看成“系统设定与配置--性能”的空间采样。有了一定数量的数据后,我们对此空间进行非线性建模,进而去推断最有可能使系统效能最高的系统设定与配置。在这流程之上,贝叶斯优化的中心思想是利用已采集的数据,来评估应该继续采集哪一个系统性能评测,进而更进一步地加强模型的准确率。贝叶斯优化的优势在于可以用非常少的步数(每一步可以想成用一组性能评测来训练)就能找到比较好的系统配置参数。另一个优势是贝叶斯优化不需要求参数的导数。 接下来,我们从两个贝叶斯优化的角度,来探讨影响准确度的两个因素:***(1)*** 模型的选取, 和 ***(2)*** 空间的采样。 在模型的选取上,一个常见的做法是假设系统里大多数的配置参数的属性都为连续值,然后把需要探索的空间当作是一个连续空间,并用回归模型来为此连续空间建模。这假设在很多的系统里是成立的。有很多的工作都用高斯过程(Gaussian Process,GP)来作为这里的回归模型。一个原因是高斯过程模型能为每一个预测提供置信区间(Confidence Interval),而这讯息能为我们之后讨论的空间采样给予帮助。简单来说,高斯过程建模的方式是基于数据和数据之间的距离。这距离是由核函数所计算出来;常见的核函数包括径向基函数核(RBF kernel)和马顿核(Matérn kernel)。已知的数据(及训练数据)的距离为 0,模型最有把握预测对,所以 Confidence Interval 最小。未知的数据如果离已知的数据越远(由核函数来定义),模型越没把握预测对,所以 Confidence Interval 越大。值得注意的是,由于需要计算数据和数据之间的距离,高斯过程模型在高维和大规模的训练集情况下,训练和推断的时间会有显著增长。 讲到这里,我们提一个有趣的工作,DNGO(Deep Networks for Global Optimization)。虽然深度神经网络(DNN)无法提供像高斯过程一样的置信区间,但它的训练和推断的时间普遍比高斯过程短。DNGO 结合了 DNN 模型和高斯过程 —— 先独立训练 基于 DNN 的回归模型,然后把最后 DNN 的 输出层替换成 GP 的模型。根据 DNGO 作者的测试,DNGO 能达到 接近 DNN 的速度并能提供高斯过程的置信区间。 不光是模型,空间的采样也是非常地重要。如果只是基于纯随机采样,不是每一次的采样都能为建模提供有效的信息增益。理想的情况下,每一次的采样点都应该能补充之前已采样点所无法得到的信息。而,“探索--利用”(Exploration--Exploitation)是一个解决这采样问题的思维。简单来说,“探索--利用”尝试在探索不确定区域和开采当前已知区域之间,来进行权衡。前者让我们有机会在还没有充分探索的区域里找寻最优解(比如之前提到的“大”置信区间的区域),以期望获得更高的回报;后者让我们在相对已知的区域里(比如之前提到的“小”置信区间的区域),来找寻最优解。然而,我们必须思考什么时候应该在探索和利用之间切换,来尽可能快地找到全局最优解。对于这问题,几个常结合高斯过程的策略包括 Upper Confidence Bound (UCB),Expected Improvement (EI),Maximum Probability of Improvement (MPI)。首先,UCB 较为简单,它的策略是直接采样置信区间最大的区域。EI 的策略是寻找哪个未采样点,相比目前已采样的点,有着最显著的更优结果。EI 评估的方法在于利用每个未采样点的预测和置信区间,来计算未采样点可能达到的最优值。MPI 和 EI 有点类似,但它的策略是寻找哪个未采样点,有着最大的概率可以比目前已采样到更优的结果 最后,我们介绍一个为系统参数调优的“探索--利用”策略:Metis。Metis 解决了系统数据上的一个挑战,也就是性能评测可能存在噪声。换句话说,重复一个性能评测可能会得到不同的结果,尤其是像延迟一样的时间类指标。Metis 在探索和利用的基础之上,也考虑了重采样来保证模型的质量。在每一次选取下一个采样点时,Metis 会评估探索的采样点,利用的采样点,和重采样的采样点所带来的信息增益。简单来说,Metis 假设这些采样点被实际采样了,并使用现有模型预估的值来更新模型,来得到这些采样点可能为模型带来的变化。 <br/> <center><img src="./img/2/metis.png" width="550"/></center> <center>图13-2-4. 此图以高斯过程模型为例子,来展示 “系统设定与配置--性能” 的空间。。另外,Metis 在探索和利用的基础之上,也考虑了重采样来保证模型的训练质量。</center> <br/> ## 13.2.4 芯片设计 芯片是电子设备中最重要的部分,早已植入到了我们生活中的方方面面,电脑、手机、汽车都离不开芯片的计算存储和控制。 芯片设计,也称之为集成电路设计,代表了人类科技与智慧的结晶。 芯片设计本身是一项复杂的系统工程,想象一下在指甲盖大小的区域上就集成了上百亿个晶体管,并且还需要对更微观的区域进行功能划分和相互通信。 由于芯片设计的流程复杂繁琐,周期长,其中的每一个步骤都离不开电子设计自动化(Electronic Design Automation, EDA)软件和算法的辅助。 芯片设计的三个核心目标是优化功耗、性能和面积(Power,Performance and Area,PPA),但三者之间需要相互取舍和权衡,即使是借助成熟的EDA工具和经验丰富的工程师,其结果也会有很大差异。 随着当前集成电路的集成规模不断扩大,优化PPA变得越来越具有挑战性。 近年来,随着AI技术的广泛应用,芯片设计公司和EDA软件提供商也在不断探索利用AI技术辅助芯片设计,提升芯片PPA和开发效率。 那么AI技术具体能够帮助解决芯片设计中的哪些问题呢?我们先看一下芯片设计的流程和步骤。 如图13-2-5所示,芯片设计流程可以主要分为前端设计(逻辑设计)和后端设计(物理设计)。 前端设计首先根据应用需求进行规格制定,确定架构,然后进行RTL代码编写,仿真验证,逻辑综合等步骤并生成门级网表。 后端设计主要包括布图规划,布局布线,时钟分析等步骤,经过功能验证后最终将门级网表转换为物理版图。 芯片代工厂根据物理版图在晶圆硅片上制造出实际的芯片。 AI技术几乎可以助力芯片设计流程中的每一个步骤。 谷歌、英伟达、三星和新思科技等公司近年来纷纷加入了使用AI技术辅助设计芯片的大潮,并且在部分场景中实现了媲美甚至超越人类工程师的性能,大幅缩短了芯片设计的开发周期。 新思科技在2020年推出了DSO.ai,旨在利用人工智能技术实现更好,更快,更便宜的芯片。 芯片设计的潜在设计空间巨大,对应的性能,功耗和面积(PPA)也不尽相同。DSO.ai利用强化学习等技术自动搜索设计空间中的最佳方案。 例如,新思科技与三星合作,使用DSO.ai设计手机芯片,不仅实现了更高水准的PPA并大幅缩减了设计周期。 英伟达对该领域的研究和探索主要包括了使用卷积神经网络,图神经网络进行设计空间探索,功耗分析,可步线性分析等。 谷歌也一直在研究如何使用人工智能算法辅助设计其AI芯片TPU,例如利用AI技术为不同网络设计最优的加速器前端架构,利用强化学习为芯片进行后端设计等。 <br/> <center><img src="./img/2/ic_flow.png" width="550"/></center> <center>图13-2-5. 芯片设计流程。芯片设计公司和EDA软件提供商正在尝试使用AI技术助力芯片设计的各个步骤。</center> <br/> 接下来我们深入介绍一个如何利用强化学习进行后端设计中的布图规划(Floorplanning)的例子。 布图规划主要是完成芯片中宏单元(Macro)和标准单元的放置,是芯片设计中最复杂最耗时的阶段之一,并且对芯片的最终PPA有着重要的影响。 我们可以把芯片中的布图规划想象成城市中的建设规划。 如何在芯片中的放置各种单元就如同在城市中规划学校、医院、住宅和商务等功能区的地理位置。 布图规划的优化目标可以抽象为最小化布线中的线长(Wirelength),并且需要满足对布局密度(Density)和布线拥塞(Congestion)的约束。 类似于在城市规划中需要使得交通线路最合理最通畅,并且满足居住密度和绿化率等要求。 谷歌提出使用强化学习解决布图规划,通过训练一个强化学习智能体(RL Agent)完成单元的放置,如图13-2-6所示。 这个智能体可以根据芯片当前的布图结果决定下一个单元在芯片中的放置位置。 强化学习中的奖励函数对模型的效果和收敛速度起着至关重要的作用,谷歌采用线长,密度和拥塞的加权和作为奖励函数,以达到各指标之间的权衡。 从一个空芯片开始,AI智能体按从大到小的顺序依次放置单元,最终获得一个系统的奖励。根据这个奖励,系统不断地对放置策略进行优化。 谷歌采集了大量的芯片布图规划对该强化学习智能体进行训练,并透露最终基于强化学习的布图规划成功应用到了谷歌TPU的设计中,将原本需要工程师几个月完成的工作量缩短到六个小时内完成。 <center><img src="./img/2/floorplan.png" width="1000"/></center> <center>图13-2-6. 使用强化学习完成芯片设计中的布图规划。</center> 当前,AI技术应用到芯片设计还处于尝试和摸索的阶段,人们期待着AI能够解决更复杂的芯片设计问题,甚至可以端到端的完成芯片设计。 然而我们也必须意识到,现阶段AI技术在芯片设计中扮演的还是“助手”的角色,AI技术可以辅助芯片设计的某一个步骤,但是并不能主导芯片设计或者拥有完全自主决策的能力。 我们相信随着AI技术本身的发展,和更多的AI技术应用到更多的芯片设计工作中,芯片设计的效率和性能会取得更大的突破。 ## 13.2.5 预测性资源调度 "The public cloud is the most powerful general-purpose computer ever assembled" --- 这句话出自于美国伯克利大学的 Joe Hellerstein 教授。云计算带来的改变在于,任何人能够在任何地方任何时间,获取其服务所需的计算资源。但是,由于计算资源毕竟有限,当存在大量的用户服务共享这些资源时,云服务商就需要考虑资源配额的问题。如果资源配额低于其用户服务所需,服务的响应性能就会降低,甚至达不到服务级别协议(Service Level Agreement,SLA)。另一方面,如果资源配额超过了其用户所需,服务的响应性能则有可能会大大地超过服务级别协议,而造成了资源的浪费。有一个值得注意的点是,我们这边所讨论的计算资源除了 CPU 的运算以外,也可以包含内存,硬盘,能源功耗,等等。更进一步地去思考,如果云服务商能够用预测的方法,来预估用户服务现在(或未来)的工作量和所需的资源,那资源配额这问题就能更好地被优化。近年来,资源配额成为 AIOps(Artificial Intelligence for IT Operations)关注的一个大方向。 我们可以先想想,为什么资源配额有可能被机器学习这一类的方法来解决。一般来说,从数据中心所采集到的历史数据来看,许多云服务的资源需求取决于用户的交互式请求(即云服务的系统负载),而用户请求有着规律。这规律主要是在时间上(比如工作日和周末的搜索引擎的关键字),但也可以是在空间上(比如不同城市居民的搜索引擎的关键字)。而机器学习能很好地帮助云服务商来学习并运用这些规律;我们从微软 Azure 的 Resource Central 来更深一步地讨论机。在现在 Azure 的框架下,用户购买资源是以虚拟机为一个部署单位。Resource Central 的作者通过收集 Azure 上的虚拟机部署的信息,周期性地学习虚拟机部署外在行为的规律(由于虚拟机的内部数据属于用户隐私),生成预测模型。模型的预测信息提供给 Azure 的资源管理器,来作为调度的依据。比如,Azure 的资源管理器决定那几个虚拟机可以同时被部署在同一个物理机上,来最高限度地达到物理机的资源上限。甚至,如果有些虚拟机很大概率地不会使用已配额的资源,云服务商可以考虑”超卖“资源。 Resource Central 用不同的模型,来分别预测一个新部署的以下指标:***(1)*** 部署里全部虚拟机的平均 CPU 使用量 (Average CPU Utilization),***(2)*** 部署里全部虚拟机的 P95 CPU 使用量 (P95 CPU Utilization),***(3)*** 部署里虚拟机的最大数量(Deployment Size in Number of VMs),***(4)*** 部署里虚拟机的最多使用核数(Deployment Size in Number of CPU Cores),***(5)*** 部署的生命时长(Deployment Lifetime),和 ***(6)*** 部署的负载等级(Deployment Workload Class)。Resource Central 使用了随机森林(Random Forest)来预测前 2 个 CPU 类别的指标,和极端梯度提升树(Extreme Gradient Boosting Tree,XGBoost Tree)来预测后 4 个指标。虽然作者没有给出选择随机森林和极端梯度提升树的理由,但我们可以从认识这两种方法开始。第一,随机森林是一个包含多个决策树的分类器,而其输出的类别是由个别决策树输出结果中哪一个类别最多而定。由于随机森林里的每个树是基于训练数据的随机样本,随机森林通常比单个决策树更准确。第二,极端梯度提升树是对梯度提升算法(Gradient Boosting Decision Tree,GBDT)的改进,而后者是由梯度提升(Gradient Boosting)和提升树(Boosting Tree)演化而来。提升树利用多个弱学习器,来更好地学习一个训练数据集。弱学习器是串行迭代生成的,而构建提升树则是通过最小化每一步的弱学习器损失函数;基于这思想之上,GBDT 利用了决策树去拟合上一步损失函数的梯度。XGBoost 在 GBDT 的工程实现上做了大量的优化,比如支持决策树之外的基分类器。 由于每个指标的模型不一样,我们这边以 P95 CPU 使用量为一个例子,来讨论实验结果。Resource Central 把 CPU 使用量分成了 4 个档次:0-25%,25-50%,50-75%,75-100%。根据新部署的信息为模型输入(比如 Azure 用户账号,用户请求的虚拟机规格,用户请求的时间),来预测最有可能的区间。对于 P95 CPU,实验数据表示 Resource Central 能达到 81% 的准确率。在模拟环境下,Resource Central 能有效地决定那几个 VM 可以同时被部署在同一个物理机上,来最高限度地达到物理机的资源上限。 <br/> <center><img src="./img/2/resource_central.png" width="475"/></center> <center>图13-2-7. Resource Central 的架构,包括了 Offline 组件(来负责数据的处理工作)和 Client 组件(来负责外部系统与 Resource Central 的通信)。</center> <br/> Resource Central 的架构如上图所示,它包含线下(Offline)和 客户端(Client)两个组件。相比模型选取,Resource Central 的作者还指出一个更重要的痛点:数据的处理工作。这是由 Offline 组件负责。数据的处理工作包括了数据的提取,清理,聚合,特征生成,等等。客户端组件则被包装成一个 Dynamic Link Library(DLL)的动态链接库,用来进行模型预测。外部系统通过和客户端的 DLL 交互,来与 Resource Central 进行通信。 ## 小结与讨论 这个章节透过案例,来展示如何把系统的问题抽象成机器学习的问题。有趣的是,对于有些系统的问题,深度学习不是唯一的工具,而传统机器学习也是可以尝试的方法。在进入下一个章节前,读者可以思考落地模型的痛点和考虑要素。 ## 参考文献 1. Jasper Snoek, Oren Rippel, Kevin Swersky, Ryan Kiros, Nadathur Satish, Narayanan Sundaram, Md. Mostofa Ali Patwary, Prabhat Prabhat, and Ryan P. Adams. 2015. [*Scalable Bayesian Optimization Using Deep Neural Networks*](https://dl.acm.org/doi/10.5555/3045118.3045349). In Proceedings of the 32nd International Conference on International Conference on Machine Learning - Volume 37 (ICML'15). 2. Tianyin Xu, Long Jin, Xuepeng Fan, Yuanyuan Zhou, Shankar Pasupathy, and Rukma Talwadker. 2015. [*Hey, You Have Given Me Too Many Knobs!: Understanding and Dealing with Over-Designed Configuration in System Software*](https://doi.org/10.1145/2786805.2786852). In Proceedings of the 2015 10th Joint Meeting on Foundations of Software Engineering (ESEC/FSE '15). Association for Computing Machinery. 3. Francis Y. Yan, Hudson Ayers, Chenzhi Zhu, Sadjad Fouladi, James Hong, Keyi Zhang, Philip Levis, and Keith Winstein. 2020. [*Learning in situ: A Randomized Experiment in Video Streaming*](https://www.usenix.org/system/files/nsdi20-paper-yan.pdf). In Proceedings of the 17th USENIX Symposium on Networked Systems Design and Implementation (NSDI '20). USENIX Association. 4. Hongzi Mao, Ravi Netravali, and Mohammad Alizadeh. 2017. [*Neural Adaptive Video Streaming with Pensieve*](https://doi.org/10.1145/3098822.3098843). In Proceedings of the Conference of the ACM Special Interest Group on Data Communication (SIGCOMM '17). Association for Computing Machinery. 5. Dana Van Aken, Andrew Pavlo, Geoffrey J. Gordon, and Bohan Zhang. 2017. [*Automatic Database Management System Tuning Through Large-scale Machine Learning*](https://doi.org/10.1145/3035918.3064029). In Proceedings of the 2017 ACM International Conference on Management of Data (SIGMOD '17). Association for Computing Machinery. 6. Omid Alipourfard, Hongqiang Harry Liu, Jianshu Chen, Shivaram Venkataraman, Minlan Yu, and Ming Zhang. 2017. [*CherryPick: Adaptively Unearthing the Best Cloud Configurations for Big Data Analytics*](https://www.usenix.org/system/files/conference/nsdi17/nsdi17-alipourfard.pdf). In Proceedings of the 14th USENIX Symposium on Networked Systems Design and Implementation (NSDI '17). USENIX Association. 7. Tim Kraska, Alex Beutel, Ed H. Chi, Jeffrey Dean, and Neoklis Polyzotis. 2018. [*The Case for Learned Index Structures*](https://doi.org/10.1145/3183713.3196909). In Proceedings of the 2018 International Conference on Management of Data (SIGMOD '18). Association for Computing Machinery. 8. Eli Cortez, Anand Bonde, Alexandre Muzio, Mark Russinovich, Marcus Fontoura, and Ricardo Bianchini. 2017. [*Resource Central: Understanding and Predicting Workloads for Improved Resource Management in Large Cloud Platforms*](https://doi.org/10.1145/3132747.3132772). In Proceedings of the 26th Symposium on Operating Systems Principles (SOSP '17). Association for Computing Machinery. 9. Zhao Lucis Li, Chieh-Jan Mike Liang, Wenjia He, Lianjie Zhu, Wenjun Dai, Jin Jiang, and Guangzhong Sun. 2018. [*Metis: Robustly Optimizing Tail Latencies of Cloud Systems*](https://www.usenix.org/system/files/conference/atc18/atc18-li-zhao.pdf). In Proceedings of the 2018 USENIX Conference on Usenix Annual Technical Conference (ATC '18). USENIX Association. 10. Jialin Ding, Umar Farooq Minhas, Jia Yu, Chi Wang, Jaeyoung Do, Yinan Li, Hantian Zhang, Badrish Chandramouli, Johannes Gehrke, Donald Kossmann, David Lomet, and Tim Kraska. 2020. [*ALEX: An Updatable Adaptive Learned Index*](https://doi.org/10.1145/3318464.3389711). In Proceedings of the 2020 ACM SIGMOD International Conference on Management of Data (SIGMOD '20). Association for Computing Machinery. 11. MAzalia Mirhoseini, Anna Goldie Mustafa Yazgan, Joe Wenjie Jiang, Ebrahim Songhori, Shen Wang, Young-Joon Lee, Eric Johnson, Omkar Pathak, Azade Nazi, Jiwoo Pak, Andy Tong, Kavya Srinivasa, William Hang, Emre Tuncer, Quoc V. Le, James Laudon, Richard Ho, Roger Carpenter, and Jeff Dean. 2021. [*A Graph Placement Methodology for Fast Chip Design*](https://doi.org/10.1038/s41586-021-03544-w). In Nature (594). 12. Baotong Lu, Jialin Ding, Eric Lo, Umar Farooq Minhas, and Tianzheng Wang. 2021. [*APEX: A High-Performance Learned Index on Persistent Memory*](https://doi.org/10.14778/3494124.3494141). Proc. VLDB Endow.
AI-System/Textbook/第13章-人工智能优化计算机系统/13.2-学习增强系统的应用.md/0
{ "file_path": "AI-System/Textbook/第13章-人工智能优化计算机系统/13.2-学习增强系统的应用.md", "repo_id": "AI-System", "token_count": 23692 }
14
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 深度学习框架基础(Introduction to deep learning frameworks) # 简介 通过第二章对常见神经网络层的介绍,不难发现深度神经网络算法具有高度模块化的特点。算法研究者在为具体应用设计神经网络模型时,能够通过沿着宽度和深度方向堆叠组合基本处理层的方式,构建起任意复杂的神经网络模型。然而,扩大神经网络规模对算力需求也相应提升,就需要使用并行计算机进行加速以提高训练效率。编程并行计算机对开发者有很高的要求,往往需要掌握较为底层的并行编程模型来显示地控制并行任务划分、任务间的数据传输和通信这些制约性能的关键因素。为了简化编程并行计算机的复杂性,深度学习框架通过建立起对深度学习软件栈的分层抽象,力图在可编程性和系统性能之间达到平衡,让软件栈中的不同角色:算法研究者,系统工程师,或是硬件工程师,不仅能够在各自的专业领域独立于其他抽象层进行开发,同时又能与软件栈中其他层接口无缝集成。 深度学习框架的设计选择经历了几次重要的发展和变化。这些选择受前沿深度学习算法和硬件加速器发展的共同推动,也反应了深度学习系统设计在可编程性,灵活性和性能之间的不断权衡。 # 内容概览 本章将围绕以下内容展开: - [3.1 基于数据流图的深度学习框架](3.1-基于数据流图的深度学习框架.md) - [3.2 神经网络计算中的控制流](3.2-神经网络计算中的控制流.md)
AI-System/Textbook/第3章-深度学习框架基础/3-前言.md/0
{ "file_path": "AI-System/Textbook/第3章-深度学习框架基础/3-前言.md", "repo_id": "AI-System", "token_count": 1275 }
15
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 9.2 自动机器学习系统与工具 - [9.2 自动机器学习系统与工具](#92-自动机器学习系统与工具) - [9.2.1 自动机器学习系统与工具概述](#921-自动机器学习系统与工具概述) - [9.2.2 探索式训练过程](#922-探索式训练过程) - [9.2.3 自动机器学习编程范式](#923-自动机器学习编程范式) - [9.2.4 自动机器学习系统优化前沿](#924-自动机器学习系统优化前沿) - [9.2.5 自动机器学习工具概述与实例分析](#925-自动机器学习工具概述与实例分析) - [9.2.6 自动机器学习系统练习](#926-自动机器学习系统练习) - [小结与讨论](#小结与讨论) - [参考文献](#参考文献) ## 9.2.1 自动机器学习系统与工具概述 自动机器学习工具能够使机器学习模型的设计和调优变得简单而可扩展,使机器学习广泛赋能各个行业、各类场景。在传统的MLops中,机器学习模型的设计环节占比较少,更多是围绕着运维方面做模型的部署、监控、升级。随着机器学习模型特别是深度学习模型越来越多的部署在各类应用中,模型的定制化需求也变得越来越高,比如不同的场景对模型精度、模型大小、推理延迟有不同的要求。这使得模型的设计与部署越来越紧密的结合在一起,成为未来MLops中的重要一环。因此,自动机器学习工具会变得越来越重要,成为模型设计与部署的重要一环,甚至成为核心环节。 一个自动机器学习工具的设计需要兼顾很多方面,如易用性(easy-to-use)、灵活性(flexibility)、扩展性(scalability)和有效性(effectiveness)。 - 易用性是指用户容易上手,对工具的学习曲线较为平缓。易用性不仅是文档的问题,更多的是工具中各级用户接口的设计。在自动机器学习中,易用性主要关注两类使用场景。一是用户已经有了初步的模型,如何利用工具快速调优模型至满足应用需求。二是用户没有模型,仅有应用需求和数据,工具如何协助用户获得满足需求的模型。 - 灵活性是指用户不仅可以利用工具提供的算法快速得到效果不错的模型,还可以通过细粒度的配置和定制模型结构、搜索策略等,进一步提升模型的效果。一个灵活性较高的工具通常模块化程度较高,提供多层用户接口,支持各个层级与组件的定制与组合。 - 扩展性是指一个模型调优任务可以灵活调整使用的计算资源的多少,比如,可以使用单台机器,也可以使用成百上千的机器。自动机器学习任务通常具有很高的可并行性,通过运行相互独立的试验,探索整个优化空间。自动机器学习工具为了提供可扩展性通常需要通过集成集群管理平台来完成,或者是直接在某个集群管理平台原生支持自动机器学习的功能。 - 有效性是指工具提供的算法和系统优化可以加速搜索模型的进程。算法的有效性是工具有效性的核心,有效的搜索算法可以通过迁移学习、多保真(multi-fidelity)技术、搜索空间的优化、解空间的精准建模等技术,来提升搜索的有效性。另一方面,系统优化也是加速搜索的一种手段,通过使用更少的计算资源获得相同的搜索结果。 目前,自动机器学习工具已大量涌现,在功能上主要分为两类:面向端到端的(end-to-end)自动模型生成工具和面向模型开发及流程定制的半自动模型生成工具。前者在使用接口上更加简单,通常提供`tool.fit(training_data)`和`tool.predict(one_sample)`这类用户接口。其中,`fit`是根据用户提供的目标训练数据,得到最优的机器学习模型;`predict`是使用搜索得到的最优模型做模型推理(inference)。提供这类接口的自动机器学习工具有[Auto-Sklearn](https://automl.github.io/auto-sklearn/master/)、[AutoKeras](https://github.com/keras-team/autokeras)。面向模型开发和流程定制的半自动模型生成相比于前者提供了更大的灵活性。它通常提供丰富的算法库,更加灵活的配置接口。用户可以灵活选择使用什么样的搜索算法、设计什么样的搜索空间、配置什么样的模型优化目标等定制化需求。这类工具包括[NNI](https://github.com/microsoft/nni)、[Ray](https://github.com/ray-project/ray)。其实,上述的两类自动机器学习工具并不冲突,可以看作一种互补,即前者可以架构于后者之上。在面向模型开发和流程定制的自动机器学习工具之上可以构建各种端到端的自动机器学习应用。 另一方面,在自动机器学习工具的使用和部署方式上又可以分为两类:以工具库(library)的形式和以服务的形式(或称之为云原生的形式)。以工具库的形式通常安装非常简便,只需一行安装命令,相对轻量,可以安装在不同的系统和环境中。另一类是以服务的形式,这类工具直接以服务的形式部署在云上,并关联的解决数据存储、实验管理、可视化分析等一系列功能。使用体验更好,但是相应的会让用户花费更高的成本。这两种工具的提供形式也不冲突,结合软件即服务(Software as a Service,SaaS)的思想,一个好的以工具库的形式设计的自动机器学习工具也可以以服务的形式部署在云上。 机器学习模型充当的是助力和赋能的角色,因此应用场景众多。自动机器学习工具的设计需要权衡通用性和定制化。过于通用会使工具离实际应用太远,无法很好支持自动机器学习任务。过于定制化,又使自动机器学习应用很难扩展,沦为某种形式的软件外包。这就要求在自动机器学习工具的设计过程中,提取共性,抽象出通用模块,设计具有可扩展性的接口,合理拆分系统层级。 自动机器学习工具有两大部分构成。一部分是算法,即各类超参数搜索算法,神经网络结构搜索算法,模型压缩算法等等在模型设计、调优和部署过程中涉及到的各种算法和流程。另一部分是平台和系统,用以支持算法的高效以及分布式运行的。 算法部分已经在上一节做了介绍。接下来,从平台和系统方面,分别从统一的运行模式,下一代编程范式,和前沿的系统优化,介绍自动机器学习系统中的几个关键组成。最后使用自动机器学习工具NNI做实例分析,并提供实验强化对本节的理解。 ## 9.2.2 探索式训练过程 ***探索式训练是前沿论文共同影射出来的方向*** 在机器学习模型的设计、调优和部署的过程中,试错(trial-and-error)是机器学习模型开发者的统一行为模式。机器学习模型很难通过一次性的模型设计就满足应用的要求,而通常是经过反复的迭代和调优。自动机器学习是对开发者调优模型的一种模拟,因此它也遵循试错的行为模式,就像图9-1-1中展示的那样。 对每一个具体的机器学习应用,所需要的机器学习模型会隐式的存在一个设计和调优的空间,模型开发者对模型的设计和调优实际是在这个空间中进行的探索。在这个空间中探索最优模型被称之为[**探索式训练**](https://www.usenix.org/system/files/osdi20-zhang_quanlu.pdf)。 探索式训练的本质是将传统的单模型训练转变为在一个模型空间(即搜索空间)中模型调优和训练相结合的过程,也即将模型调优囊括到模型训练的过程中。图9-2-1展示了神经网络结构搜索中几种探索式训练的例子。如图9-2-1(a)所示,在模型设计过程中,将一层中的算子替换成其他类型算子是一种常见模型修改方式,修改模型是为了探索出表现更优的模型。另外,模型中算子之间的连接也会影响模型的表现,所以改变连接也是常见的调优方式,比如增加跳线(skip connection)。另外开发人员也会基于某种规则对模型做变化,比如在模型中所有Conv2d算子的后面添加BatchNorm算子,或者将BatchNorm统一替换成LayerNorm。如图9-2-1(b)所示,开发人员会尝试将某种模型结构做适当泛化。比如对一个特定的Inception cell的多分支结构做泛化,尝试不同的分支数,以及在不同分支上尝试不同的算子。如图9-2-1(c)所示,开发人员也会设计若干规则,在一个基础模型上不断的应用这些规则使模型逐渐变大。 <center> <img src="./img/9-2-1-exploratory-training.png"/></center> <center>图9-2-1. 探索式训练的例子</center> 通常这些对模型的细粒度的改变和调优是模型开发者难以根据经验准确知道调优结果(即模型表现会变好还是变坏),需要实际将模型训练后得到其表现。因此将这类调优和模型训练结合到一起可以让开发人员更专注于模型骨架的设计,而将这种细粒度调优留给自动化搜索。 ## 9.2.3 自动机器学习编程范式 ***前沿的编程接口*** 探索式训练使模型开发者在模型设计过程中可以含糊得表达一个大致的模型设计思路,而让自动机器学习通过搜索的方式丰富模型的各个细节。如何设计一个服务于这种需求的编程范式是一个挑战,它需要既可以简单地表达模型空间又具有较强的表达能力。 从图9-2-1以及模型开发者调优模型的行为模式,可以看出模型的设计和调优本质上是一个不断改变(mutate)模型的过程。因此表达一个模型空间等价于表达模型的一个可变化范围。例如,神经网络模型中的一层中初始使用的是Conv3x3算子,开发人员可以通过模型变化将其替换成Conv5x5,或者替换成Maxpool,来验证模型效果是否有提升。这里模型的变化是由变化方式(例如算子的替换操作)和变化空间(例如替换成Conv3x3、Conv5x5或者Maxpool)构成的。例如图9-2-1(b)中的模型变化,其变化方式是增加删除分支和替换分支上的算子,变化空间是所有候选的分支数量和所有的候选算子。 模型形变(mutation)就是表达模型空间的一种编程抽象。它将深度学习模型看作一个计算流图(data flow graph),并提供两类原语(primitive)来操作模型完成模型形变:一类是图操作原语,即图的增删查改,用以灵活的变化模型;另一类是表达变化空间的原语,例如API *choice()*是从多个候选中选择一个。一个完整的模型空间是由一个初始模型(base model)和模型形变构成的。图9-2-2是用模型形变描述出的模型空间的一个示例,图9-2-2(a)是初始模型。该示例中的模型空间是将模型第三层“model/maxpool”替换成一个类似inception cell的层。这个层可以有2到5个分支,每个分支上的算子可以从3个候选算子(即Conv、DepthwiseConv或者Maxpool)中选1个。图9-2-2(b)用图示描述了该模型形变。图9-2-2(c)是利用上述编程范式实现的一个模型空间的代码。其中,“InceptionMutator”是模型变化的伪代码,它在 “mutate”函数中确定一个具体的分支数量(第9行),对于每个分支从候选算子中选择一个并连接到模型中。最后使用“apply_mutator”将这个模型变化应用到模型的目标位置上,即“model/maxpool”。 <center> <img src="./img/9-2-2-modelspace.png"/></center> <center>图9-2-2. 一个模型空间的示例</center> 通过模型形变表述模型空间的这种新编程范式可以将模型空间的表达、搜索算法和系统优化三者有机地结合起来,并得到下面三个特性。 - 任意的模型空间均可由该编程范式灵活表达; - 通过该编程接口表达的模型空间可以被搜索算法正确解析,大大提升搜索算法的可复用性。 - 系统层面,模型在探索式训练的过程中做的任何变化都可以被精准定位,使得模型和模型之间的关系变得非常清晰,从而打开了跨模型优化的机会。 ***更加易用的编程接口*** 模型形变是表达模型空间的核心抽象,虽然具有很大的灵活性,但是在编程的易用性上稍有欠缺。因此在该编程范式之上可以提供更加简洁易用的语法糖(syntactic sugar)。在神经网络结构搜索中,有三个API较为常用,可以构建出大部分模型空间。它们分别是“LayerChoice”,“InputChoice”和“ValueChoice”。“LayerChoice”是创建模型中的一层,该层的算子是从一系列候选算子中选择一个。“InputChoice”是创建连接,允许从一系列候选张量(tensor)中选择一个或者多个张量作为模型中一个层的输入。“ValueChoice”是从多个候选数值中选择一个,比如用来选择dropout rate的大小。这些API可以直接在编写PyTorch或者Tensorflow的模型代码中使用,将编写的模型变为一个模型空间。 ## 9.2.4 自动机器学习系统优化前沿 ***自动机器学习系统架构*** 自动机器学习系统一般由四部分构成,如图9-2-3所示。模型空间分析器将用户编写的模型空间解析成系统可以理解和优化的中间表达(intermediate representation)。然后模型生成器可以进行模型生成。生成什么模型是由探索式训练控制器决定。模型控制器中会运行一个搜索算法,来决定要探索到具体哪一个或者一些模型。生成的模型可以由跨模型优化器做一系列系统上的优化加速。最后优化后的模型被放到模型训练平台(如Kubernetes)上训练。训练的结果(如模型精度)会反馈给探索式训练控制器用于指导之后的模型生成。 <center> <img src="./img/9-2-3-architecture.png"/></center> <center>图9-2-3. 自动机器学习系统架构</center> ***前沿的自动机器学习优化技术*** 探索式训练区别于以往的模型训练在于它的目标不再是将一个单一的模型训练好,而是快速发现模型空间中表现好的模型并将其训练好。这就带来了新的系统优化机会。常见的主要有三类优化,一类是利用模型之间的相似性加速多个模型的训练,第二类是加速探索式训练过程,第三类是针对某些探索式训练做定制化的优化。下面依次介绍。 ***多模型训练加速*** 探索式训练有两个特点:一次可以生成多个模型进行探索,生成的模型之间有很大的相似性。这给跨模型优化带来了很大的优化空间。(i) 由于生成的模型之间相似性很大,这些模型可以共用模型中相同的部分,比如使用相同的数据集,相同的数据预处理逻辑,甚至是相同的子模型。图9-2-4是一个这样的例子。因此,这些相同的计算可以通过去重变成一份。通过对整合后的模型做合理的切分并放置到不同的计算设备上,可以达到总体更快的训练速度。 <center> <img src="./img/9-2-4-cse.png"/></center> <center>图9-2-4. 跨模型优化示例</center> (ii)上面介绍的优化更多的是去重那些没有训练参数的模型部分,对于有训练参数的模型部分,由于每一个模型需要训练自己的参数,因此不能做去重。这时可以做模型之间的融合(fuse)。前面章节介绍过模型优化中的算子融合,即相邻的两个算子可以融合成一个算子从而提升运行效率。而在自动机器学习系统里的算子融合通常表示不同模型中对应位置的相同算子可以融合在一起,从而达到提升设备利用率(utilization)的效果。这种优化对于小模型的探索式训练非常有效,通过模型之间的融合可以充分利用计算设备上的计算资源。这种优化在[Retiarii](https://www.usenix.org/system/files/osdi20-zhang_quanlu.pdf)和[HFTA](https://proceedings.mlsys.org/paper/2021/file/a97da629b098b75c294dffdc3e463904-Paper.pdf)两个研究工作中均有被提出。 ***探索式训练过程的加速*** 探索式训练过程也可以被有效加速,通过对模型训练做合理的资源分配和调度。这里介绍两种优化技术。一种是时分复用的模型训练。这种方式会分配少量计算资源给一个新生成的模型,用于初步估计这个模型的效果。如果表现较好则保留继续参与下一轮的时分复用,如果表现不好则直接剔除。这样可以在使用相同量计算资源的情况下,尝试更多的模型,从而快速发现表现好的那些模型。这种优化方法最早在[Gandiva](https://www.usenix.org/conference/osdi18/presentation/xiao)中被提出。另一种优化技术是通过评估正在训练的模型的表现,动态调整分配给它们的计算资源,表现好的模型会被分配更多资源,而表现较差的模型被分配到的资源会相对较少。它与时分复用的区别在于,它调整资源的维度不在时间维度上,而是在资源的数量维度上,即通过调整模型可以使用的计算资源量来提升探索式训练的效率。早停算法可以看作是这种资源调度的一种极端情况。 ***针对具体场景优化*** 探索式训练过程有很多算法,例如9.1节中介绍的多试验搜索和单发搜索。单发搜索在行为上非常特殊,是将候选的模型结构合并成一个超网络,每一个minibatch只激活该超模型中的一个子模型。这种超模型的分布式训练需要有特殊设计的模型并行(model parallelism)策略。典型的方法有混合并行([mixed parallelism](https://www.usenix.org/system/files/osdi20-zhang_quanlu.pdf))和[NASPipe](https://drive.google.com/file/d/1gUCAVK0UjN86kuaZbbdjFnsYNDiSgxXK/view)。 ## 9.2.5 自动机器学习工具概述与实例分析 ***自动机器学习工具概述*** 目前市面上的自动机器学习工具种类繁多,侧重点各有不同。自动机器学习工具在围绕着三个核心能力发展和演进。 - 模型自动设计与调优的算法。有些自动机器学习工具仅提供一种模型设计和调优算法,如Auto-Sklearn,TPOT,H2O AutoML,AutoKeras。这类工具通常提供十分简洁的用户接口,如`tool.fit`,`tool.predict`。由于不同的机器学习任务(如图像识别,文本分类)通常需要不同的模型设计空间和搜索方式,这类工具会分任务做定制化模型搜索。Auto-Sklearn和TPOT主要针对scikit-learn中的传统机器学习算法,AutoKeras则主要针对深度学习模型。另外一些自动机器学习工具通过模块化设计提供一系列主流的模型搜索算法(如9.1节中介绍的算法),由用户根据自己的需求选择合适的搜索算法应用到自己的任务中,如NNI,Ray。这类工具的定位偏重于辅助模型开发者设计和调优模型。另外,一些工具,如Ray,Weights&Biases,MLflow,在算法上主要支持的是超参数搜索算法。而且Weights&Biases和MLflow虽然有超参数搜索的能力,但是他们在工具的定位上是机器学习训练任务的管理工具。 - 分布式模型搜索与训练的能力。模型搜索通常需要较多的计算资源。一些自动机器学习工具可以连接到不同类型的计算资源上,比如远程的计算服务器,Kubernetes集群,云计算服务。如Ray和NNI都可以连接不同的计算资源,其中NNI是用统一的接口将不同的计算资源封装起来,令模型搜索无差别的使用不同类型的计算资源(后面会详细介绍)。Ray设计了一种结合了调度能力的远程过程调用(即`ray.remote`),将计算分发到不同的计算节点上。Weights&Biases也具有类似的功能,将试验分发到用户提供的机器上运行。auto-sklearn、AutoKeras没有提供分布式的能力。有些自动机器学习工具与集群管理工具或者云服务紧耦合,如Kubeflow(Kubeflow是在kubernetes上构建的,针对机器学习任务运行和部署的工具)中原生支持的自动机器学习工具Katib。在Katib中,整个超参数搜索的配置,如需要搜索的超参数及取值范围、搜索并行度,直接写到了机器学习训练任务的配置文件中。无论上述哪种方式的分布式能力,只需合理的封装,都可以在云上以SaaS的形式提供自动机器学习能力。 - 编程接口与用户交互。现有的自动机器学习工具虽然提供的编程接口各不相同,但是总体可以分为两类。一类是用户提供任务数据,工具直接返回搜索到的好的模型,即上述的Auto-Sklearn,TPOT等。另一类是用户需要自己编写或者指定模型,指定搜索空间及合适的搜索算法,来完成搜索过程。用户编写和指定这些内容的方式也有多种,一些工具是通过配置文件描述搜索空间,有些是在Python代码里以dict直接描述,还有些为了描述搜索空间的简便,支持将超参数的可行域直接在模型使用该超参数的对应位置描述出来,如NNI中的ValueChoice。在试验代码的编程方式也有多种,一类是试验代码作为一个独立脚本,通过命令行参数或者工具提供的API和搜索过程交互。另一类是将试验代码写作一个函数,其输入参数是超参数的取值,返回值是该组超参数取值下的表现。前者在试验的隔离性上更优一点,后者在试验代码编写上(特别是较简单的试验代码)更友好一点。用户交互方面有两种模式:命令行和图形化。图形化是机器学习模型开发的有力工具,仅仅针对深度学习模型训练的可视化和管理工具已经涌现出很多,如TensorBoard,Weights&Biases,MLflow。在自动机器学习工具中可视化也是重要的组成部分,每个试验的训练信息,试验之间的对比,搜索过程的演进,搜索出的模型的可视化,以及实验管理等。可以将自动机器学习的可视化视为传统深度学习模型训练可视化的增强。 ***NNI*** NNI是轻量级自动机器学习工具,其中主要包括超参数搜索、网络结构搜索和模型压缩。这三种类型的任务有一个共同的特点,即不断尝试新的候选模型结构或者模型配置。每一个候选需要评估其表现。因此,一个自动机器学习工具需要具备的基本功能是机器学习模型评估任务的分发。NNI提供了向不同训练平台分发任务的能力,应用不同搜索算法的能力,以及友好的用户编程和交互接口。 <center> <img src="./img/9-2-5-nni.png"/></center> <center>图9-2-5. 自动机器学习工具NNI的基础架构</center> 图9-2-5展示了NNI的基础架构。首先,任务分发能力是由图中的Training service提供,每一个训练平台都可以通过Training service供自动机器学习任务使用。Tuner/Assesor是搜索算法,NNI提供的编程接口可以支持复杂的搜索算法实现。图左侧是用户编程接口和实验管理接口。 ## 9.2.6 自动机器学习系统练习 ***实验目的*** 通过试用 NNI 了解自动机器学习,熟悉自动机器学习中的基本概念 ***实验环境*** * Ubuntu * Python==3.7.6 * NNI==1.8 * PyTorch==1.5.0 ***实验原理*** 在本实验中,我们将处理 CIFAR-10 图片分类数据集。基于一个表现较差的基准模型和训练方法,我们将使用自动机器学习的方法进行模型选择和优化、超参数调优,从而得到一个准确率较高的模型。 ***实验内容*** *实验流程图* ![](/imgs/Lab8-flow.png "Lab8 flow chat") *具体步骤* 1. 熟悉 PyTorch 和 CIFAR-10 图像分类数据集。可以先阅读教程:https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html 2. 熟悉 NNI 的基本使用。阅读教程:https://nni.readthedocs.io/en/latest/Tutorial/QuickStart.html 3. 运行CIFAR-10代码并观察训练结果。在实验目录下,找到 `hpo/main.py`,运行程序,记录模型预测的准确率。 4. 手动参数调优。通过修改命令行参数来手动调整超参,以提升模型预测准确率。记录调整后的超参名称和数值,记录最终准确率。 **注:** main.py 暴露大量的命令行选项,可以进行调整,命令行选项可以直接从代码中查找,或通过 `python main.py -h` 查看。例如,`--model`(默认是 resnet18),`--initial_lr`(默认是 0.1),`--epochs`(默认是 300)等等。一种简单的方法是通过手工的方法调整参数(例如 `python main.py --model resnet50 --initial_lr 0.01`)然后根据结果再做调整。 5. 使用 NNI 加速参数调优过程。 1. 参考NNI的基本使用教程,安装NNI(建议在Linux系统中安装NNI并运行实验)。 2. 参照NNI教程运行 `mnist-pytorch` 样例程序(程序地址: https://github.com/microsoft/nni/tree/master/examples/trials/mnist-pytorch ),测试安装正确性,并熟悉NNI的基本使用方法。 3. 使用NNI自动调参功能调试hpo目录下CIFAR-10程序的超参。创建 `search_space.json` 文件并编写搜索空间(即每个参数的范围是什么),创建 `config.yml` 文件配置实验(可以视资源量决定搜索空间的大小和并行量),运行程序。在 NNI 的 WebUI 查看超参搜索结果,记录结果截图,并记录得出最好准确率的超参配置。 6. (可选)上一步中进行的模型选择,是在若干个前人发现的比较好的模型中选择一个。此外,还可以用自动机器学习的方法选择模型,即网络架构搜索(NAS)。请参考nas目录下 `model.py`,采用 DARTS 的搜索空间,选择合适的 Trainer,进行搜索训练。记录搜索结果架构,并用此模型重新训练,记录最终训练准确率。 **注:** 搜索完成后得到的准确率并不是实际准确率,需要使用搜索到的模型重新进行单独的训练。具体请参考 NNI NAS 文档:https://nni.readthedocs.io/en/latest/nas.html ***实验报告*** *实验环境* |||| |--------|--------------|--------------------------| |硬件环境 | CPU(vCPU数目)| &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | | | GPU(型号,数目) | | | 软件环境 | OS版本 | | | | 深度学习框架<br>python包名称及版本 | | | | CUDA版本 | | |||| *实验结果* 1. 记录不同调参方式下,cifar10程序训练结果的准确率。 |||| |---------|-----------------|------------| | 调参方式 | &nbsp; &nbsp; 超参名称和设置值 &nbsp; &nbsp; | &nbsp; &nbsp; 模型准确率 &nbsp; &nbsp; | | &nbsp; <br /> &nbsp; 原始代码 &nbsp; <br /> &nbsp; ||| | &nbsp; <br /> &nbsp; 手动调参 &nbsp; <br /> &nbsp; ||| | &nbsp; <br /> &nbsp; NNI自动调参 &nbsp; <br /> &nbsp; ||| | &nbsp; <br /> &nbsp; 网络架构搜索 <br />&nbsp; &nbsp; (可选) <br /> &nbsp; ||| |||| 2. 提交使用NNI自动调参方式,对 main.py、search_space.json、config.yml 改动的代码文件或截图。 <br /> <br /> 3. 提交使用NNI自动调参方式,Web UI上的结果截图。 <br /> <br /> 4. (可选)提交 NAS 的搜索空间、搜索方法和搜索结果(得到的架构和最终准确率)。 <br /> <br /> ***参考代码与资料*** *自动调参* 代码位置:`Lab8/hpo` 参考答案:`Lab8/hpo-answer` *网络架构搜索(NAS)* 代码位置:`Lab8/nas` *参考资料* * Cifar10简介:https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html * NNI文档:https://nni.readthedocs.io/en/latest/ * NNI mnist-pytorch代码:https://github.com/microsoft/nni/tree/v1.9/examples/trials/mnist-pytorch * NNI NAS 文档:https://nni.readthedocs.io/en/latest/nas.html * DARTS GitHub:https://github.com/quark0/darts ## 小结与讨论 自动机器学习系统和工具是机器学习模型在落地过程中不可或缺的重要组成部分。像机器学习模型的不断进步一样,自动机器学习系统也在不断摸索演进。从模型的训练和调优过程中,提取标准化流程并以系统和工具的形式提高模型开发人员的效率。流程被标准化之后,其中的模块就可以更加通用和高效。随着机器学习模型的日渐成熟,自动机器学习工具也逐渐演进得更加强大。从一个给开发人员的开发工具,到更加端到端的模型生成,可以自动化的部分越来越多。另外在模型的整个生命周期上,自动化也越来越多的涉入,比如在模型的部署和服务过程中,也有越来越多的组件被自动化,逐渐演进成整个MLOps。 虽然自动机器学习系统和工具演进迅速,但是目前的这类系统和工具还有很大的局限性。由于深度学习框架并没有收敛(PyTorch,TensorFlow),这给提供一个通用的自动机器学习工具带来很大困难,一些相对高阶的优化方式很难提供稳定鲁棒的支持。另外,端到端的模型自动化生成仍然具有很大挑战,特别是考虑到更加多样的硬件环境。克服这些局限性可以很大程度上促进机器学习模型的广泛部署。 ## 参考文献 <div id="xx-1"></div> 1. [Zhang, Quanlu, Zhenhua Han, Fan Yang, Yuge Zhang, Zhe Liu, Mao Yang, and Lidong Zhou. "Retiarii: A Deep Learning {Exploratory-Training} Framework." In 14th USENIX Symposium on Operating Systems Design and Implementation (OSDI 20), pp. 919-936. 2020.](https://www.usenix.org/system/files/osdi20-zhang_quanlu.pdf) 2. [Wang, Shang, Peiming Yang, Yuxuan Zheng, Xin Li, and Gennady Pekhimenko. "Horizontally Fused Training Array: An Effective Hardware Utilization Squeezer for Training Novel Deep Learning Models." Proceedings of Machine Learning and Systems 3 (2021): 599-623.](https://proceedings.mlsys.org/paper/2021/file/a97da629b098b75c294dffdc3e463904-Paper.pdf) 3. [Xiao, Wencong, Romil Bhardwaj, Ramachandran Ramjee, Muthian Sivathanu, Nipun Kwatra, Zhenhua Han, Pratyush Patel et al. "Gandiva: Introspective cluster scheduling for deep learning." In 13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18), pp. 595-610. 2018.](https://www.usenix.org/conference/osdi18/presentation/xiao) 4. [Zhao, Shixiong, Fanxin Li, Xusheng Chen, Tianxiang Shen, Li Chen, Sen Wang, Nicholas Zhang, Cheng Li, and Heming Cui. "NASPipe: high performance and reproducible pipeline parallel supernet training via causal synchronous parallelism." In Proceedings of the 27th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, pp. 374-387. 2022.](https://drive.google.com/file/d/1gUCAVK0UjN86kuaZbbdjFnsYNDiSgxXK/view)
AI-System/Textbook/第9章-自动化机器学习系统/9.2-自动化机器学习系统与工具.md/0
{ "file_path": "AI-System/Textbook/第9章-自动化机器学习系统/9.2-自动化机器学习系统与工具.md", "repo_id": "AI-System", "token_count": 21348 }
16
# 人工智能系统(System for AI)课程目录 ## Basic Lectures - Lecture1-2: System for AI-1&2-Introduction and System Perspective [ [PDF](./SystemforAI-1-2-Introduction%20and%20System%20Perspective.pdf) ] - Lecture3: System for AI-3-Computation frameworks for DNN [ [PDF](./SystemforAI-3-Framework.pdf) ] - Lecture4: System for AI-4-Computer architecture for Matrix computation [ [PDF](./SystemforAI-4-Computer%20architecture%20for%20Matrix%20computation.pdf) ] - Lecture5: System for AI-5-Distributed training algorithms [ [PDF](./SystemforAI-5-DistributedAlgo.pdf) ] - Lecture6: System for AI-6-Distributed training systems [ [PDF](./SystemforAI-6-DistributedSys.pdf) ] - Lecture7: System for AI-7-Scheduling and resource management system [ [PDF](./SystemforAI-7-Platform.pdf) ] - Lecture8: System for AI-8-Inference systems [ [PDF](./SystemforAI-8-Inference.pdf) ] ## Advanced Lectures - Lecture9: System for AI-9-Compilation and Optimization [ [PDF](./SystemforAI-9-Compilation%20and%20Optimization.pdf) ] - Lecture10: System for AI-10-Efficiency via Compression and Sparsity [ [PDF](./SystemforAI-10-Efficiency%20via%20Compression%20and%20Sparsity.pdf) ] - Lecture11: System for AI-11-AutoML systems [ [PDF](./SystemforAI-11-AutoML.pdf) ] - Lecture12: System for AI-12-Reinforcement learning systems [ [PDF](./SystemforAI-12-System%20for%20Reinforcement%20Learning.pdf) ] - Lecture13: System for AI-13-Security and Privacy [ [PDF](./SystemforAI-13-Security%20and%20Privacy.pdf) ] - Lecture14: System for AI-14-AI for systems [ [PDF](./SystemforAI-14-AI%20for%20Systems.pdf) ]
AI-System/docs/README.md/0
{ "file_path": "AI-System/docs/README.md", "repo_id": "AI-System", "token_count": 586 }
17