Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
OutboundTransportManager.stop
(self, wait: bool = True)
Stop all running transports.
Stop all running transports.
async def stop(self, wait: bool = True): """Stop all running transports.""" if self._process_task and not self._process_task.done(): self._process_task.cancel() await self.task_queue.complete(None if wait else 0) for transport in self.running_transports.values(): await transport.stop() self.running_transports = {}
[ "async", "def", "stop", "(", "self", ",", "wait", ":", "bool", "=", "True", ")", ":", "if", "self", ".", "_process_task", "and", "not", "self", ".", "_process_task", ".", "done", "(", ")", ":", "self", ".", "_process_task", ".", "cancel", "(", ")", "await", "self", ".", "task_queue", ".", "complete", "(", "None", "if", "wait", "else", "0", ")", "for", "transport", "in", "self", ".", "running_transports", ".", "values", "(", ")", ":", "await", "transport", ".", "stop", "(", ")", "self", ".", "running_transports", "=", "{", "}" ]
[ 175, 4 ]
[ 182, 36 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.get_registered_transport_for_scheme
(self, scheme: str)
Find the registered transport ID for a given scheme.
Find the registered transport ID for a given scheme.
def get_registered_transport_for_scheme(self, scheme: str) -> str: """Find the registered transport ID for a given scheme.""" try: return next( transport_id for transport_id, transport in self.registered_transports.items() if scheme in transport.schemes ) except StopIteration: pass
[ "def", "get_registered_transport_for_scheme", "(", "self", ",", "scheme", ":", "str", ")", "->", "str", ":", "try", ":", "return", "next", "(", "transport_id", "for", "transport_id", ",", "transport", "in", "self", ".", "registered_transports", ".", "items", "(", ")", "if", "scheme", "in", "transport", ".", "schemes", ")", "except", "StopIteration", ":", "pass" ]
[ 184, 4 ]
[ 193, 16 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.get_running_transport_for_scheme
(self, scheme: str)
Find the running transport ID for a given scheme.
Find the running transport ID for a given scheme.
def get_running_transport_for_scheme(self, scheme: str) -> str: """Find the running transport ID for a given scheme.""" try: return next( transport_id for transport_id, transport in self.running_transports.items() if scheme in transport.schemes ) except StopIteration: pass
[ "def", "get_running_transport_for_scheme", "(", "self", ",", "scheme", ":", "str", ")", "->", "str", ":", "try", ":", "return", "next", "(", "transport_id", "for", "transport_id", ",", "transport", "in", "self", ".", "running_transports", ".", "items", "(", ")", "if", "scheme", "in", "transport", ".", "schemes", ")", "except", "StopIteration", ":", "pass" ]
[ 195, 4 ]
[ 204, 16 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.get_running_transport_for_endpoint
(self, endpoint: str)
Find the running transport ID to use for a given endpoint.
Find the running transport ID to use for a given endpoint.
def get_running_transport_for_endpoint(self, endpoint: str): """Find the running transport ID to use for a given endpoint.""" # Grab the scheme from the uri scheme = urlparse(endpoint).scheme if scheme == "": raise OutboundDeliveryError( f"The uri '{endpoint}' does not specify a scheme" ) # Look up transport that is registered to handle this scheme transport_id = self.get_running_transport_for_scheme(scheme) if not transport_id: raise OutboundDeliveryError( f"No transport driver exists to handle scheme '{scheme}'" ) return transport_id
[ "def", "get_running_transport_for_endpoint", "(", "self", ",", "endpoint", ":", "str", ")", ":", "# Grab the scheme from the uri", "scheme", "=", "urlparse", "(", "endpoint", ")", ".", "scheme", "if", "scheme", "==", "\"\"", ":", "raise", "OutboundDeliveryError", "(", "f\"The uri '{endpoint}' does not specify a scheme\"", ")", "# Look up transport that is registered to handle this scheme", "transport_id", "=", "self", ".", "get_running_transport_for_scheme", "(", "scheme", ")", "if", "not", "transport_id", ":", "raise", "OutboundDeliveryError", "(", "f\"No transport driver exists to handle scheme '{scheme}'\"", ")", "return", "transport_id" ]
[ 206, 4 ]
[ 221, 27 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.get_transport_instance
(self, transport_id: str)
Get an instance of a running transport by ID.
Get an instance of a running transport by ID.
def get_transport_instance(self, transport_id: str) -> BaseOutboundTransport: """Get an instance of a running transport by ID.""" return self.running_transports[transport_id]
[ "def", "get_transport_instance", "(", "self", ",", "transport_id", ":", "str", ")", "->", "BaseOutboundTransport", ":", "return", "self", ".", "running_transports", "[", "transport_id", "]" ]
[ 223, 4 ]
[ 225, 52 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.enqueue_message
(self, context: InjectionContext, outbound: OutboundMessage)
Add an outbound message to the queue. Args: context: The context of the request outbound: The outbound message to deliver
Add an outbound message to the queue.
def enqueue_message(self, context: InjectionContext, outbound: OutboundMessage): """ Add an outbound message to the queue. Args: context: The context of the request outbound: The outbound message to deliver """ targets = [outbound.target] if outbound.target else (outbound.target_list or []) transport_id = None for target in targets: endpoint = target.endpoint try: transport_id = self.get_running_transport_for_endpoint(endpoint) except OutboundDeliveryError: pass if transport_id: break if not transport_id: raise OutboundDeliveryError("No supported transport for outbound message") queued = QueuedOutboundMessage(context, outbound, target, transport_id) queued.retries = 5 self.outbound_new.append(queued) self.process_queued()
[ "def", "enqueue_message", "(", "self", ",", "context", ":", "InjectionContext", ",", "outbound", ":", "OutboundMessage", ")", ":", "targets", "=", "[", "outbound", ".", "target", "]", "if", "outbound", ".", "target", "else", "(", "outbound", ".", "target_list", "or", "[", "]", ")", "transport_id", "=", "None", "for", "target", "in", "targets", ":", "endpoint", "=", "target", ".", "endpoint", "try", ":", "transport_id", "=", "self", ".", "get_running_transport_for_endpoint", "(", "endpoint", ")", "except", "OutboundDeliveryError", ":", "pass", "if", "transport_id", ":", "break", "if", "not", "transport_id", ":", "raise", "OutboundDeliveryError", "(", "\"No supported transport for outbound message\"", ")", "queued", "=", "QueuedOutboundMessage", "(", "context", ",", "outbound", ",", "target", ",", "transport_id", ")", "queued", ".", "retries", "=", "5", "self", ".", "outbound_new", ".", "append", "(", "queued", ")", "self", ".", "process_queued", "(", ")" ]
[ 227, 4 ]
[ 251, 29 ]
python
en
['en', 'error', 'th']
False
OutboundTransportManager.enqueue_webhook
( self, topic: str, payload: dict, endpoint: str, retries: int = None )
Add a webhook to the queue. Args: topic: The webhook topic payload: The webhook payload endpoint: The webhook endpoint retries: Override the number of retries Raises: OutboundDeliveryError: if the associated transport is not running
Add a webhook to the queue.
def enqueue_webhook( self, topic: str, payload: dict, endpoint: str, retries: int = None ): """ Add a webhook to the queue. Args: topic: The webhook topic payload: The webhook payload endpoint: The webhook endpoint retries: Override the number of retries Raises: OutboundDeliveryError: if the associated transport is not running """ transport_id = self.get_running_transport_for_endpoint(endpoint) queued = QueuedOutboundMessage(None, None, None, transport_id) queued.endpoint = f"{endpoint}/topic/{topic}/" queued.payload = json.dumps(payload) queued.state = QueuedOutboundMessage.STATE_PENDING queued.retries = 5 if retries is None else retries self.outbound_new.append(queued) self.process_queued()
[ "def", "enqueue_webhook", "(", "self", ",", "topic", ":", "str", ",", "payload", ":", "dict", ",", "endpoint", ":", "str", ",", "retries", ":", "int", "=", "None", ")", ":", "transport_id", "=", "self", ".", "get_running_transport_for_endpoint", "(", "endpoint", ")", "queued", "=", "QueuedOutboundMessage", "(", "None", ",", "None", ",", "None", ",", "transport_id", ")", "queued", ".", "endpoint", "=", "f\"{endpoint}/topic/{topic}/\"", "queued", ".", "payload", "=", "json", ".", "dumps", "(", "payload", ")", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_PENDING", "queued", ".", "retries", "=", "5", "if", "retries", "is", "None", "else", "retries", "self", ".", "outbound_new", ".", "append", "(", "queued", ")", "self", ".", "process_queued", "(", ")" ]
[ 253, 4 ]
[ 276, 29 ]
python
en
['en', 'error', 'th']
False
OutboundTransportManager.process_queued
(self)
Start the process to deliver queued messages if necessary. Returns: the current queue processing task or None
Start the process to deliver queued messages if necessary.
def process_queued(self) -> asyncio.Task: """ Start the process to deliver queued messages if necessary. Returns: the current queue processing task or None """ if self._process_task and not self._process_task.done(): self.outbound_event.set() elif self.outbound_new or self.outbound_buffer: self._process_task = self.loop.create_task(self._process_loop()) self._process_task.add_done_callback(lambda task: self._process_done(task)) return self._process_task
[ "def", "process_queued", "(", "self", ")", "->", "asyncio", ".", "Task", ":", "if", "self", ".", "_process_task", "and", "not", "self", ".", "_process_task", ".", "done", "(", ")", ":", "self", ".", "outbound_event", ".", "set", "(", ")", "elif", "self", ".", "outbound_new", "or", "self", ".", "outbound_buffer", ":", "self", ".", "_process_task", "=", "self", ".", "loop", ".", "create_task", "(", "self", ".", "_process_loop", "(", ")", ")", "self", ".", "_process_task", ".", "add_done_callback", "(", "lambda", "task", ":", "self", ".", "_process_done", "(", "task", ")", ")", "return", "self", ".", "_process_task" ]
[ 278, 4 ]
[ 290, 33 ]
python
en
['en', 'error', 'th']
False
OutboundTransportManager._process_done
(self, task: asyncio.Task)
Handle completion of the drain process.
Handle completion of the drain process.
def _process_done(self, task: asyncio.Task): """Handle completion of the drain process.""" exc_info = task_exc_info(task) if exc_info: LOGGER.exception( "Exception in outbound queue processing:", exc_info=exc_info ) if self._process_task and self._process_task.done(): self._process_task = None
[ "def", "_process_done", "(", "self", ",", "task", ":", "asyncio", ".", "Task", ")", ":", "exc_info", "=", "task_exc_info", "(", "task", ")", "if", "exc_info", ":", "LOGGER", ".", "exception", "(", "\"Exception in outbound queue processing:\"", ",", "exc_info", "=", "exc_info", ")", "if", "self", ".", "_process_task", "and", "self", ".", "_process_task", ".", "done", "(", ")", ":", "self", ".", "_process_task", "=", "None" ]
[ 292, 4 ]
[ 300, 37 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager._process_loop
(self)
Continually kick off encoding and delivery on outbound messages.
Continually kick off encoding and delivery on outbound messages.
async def _process_loop(self): """Continually kick off encoding and delivery on outbound messages.""" # Note: this method should not call async methods apart from # waiting for the updated event, to avoid yielding to other queue methods while True: self.outbound_event.clear() loop_time = time.perf_counter() upd_buffer = [] for queued in self.outbound_buffer: if queued.state == QueuedOutboundMessage.STATE_DONE: if queued.error: LOGGER.exception( "Outbound message could not be delivered to %s", queued.endpoint, exc_info=queued.error, ) if self.handle_not_delivered: self.handle_not_delivered(queued.context, queued.message) continue # remove from buffer deliver = False if queued.state == QueuedOutboundMessage.STATE_PENDING: deliver = True elif queued.state == QueuedOutboundMessage.STATE_RETRY: if queued.retry_at < loop_time: queued.retry_at = None deliver = True if deliver: queued.state = QueuedOutboundMessage.STATE_DELIVER self.deliver_queued_message(queued) upd_buffer.append(queued) new_pending = 0 new_messages = self.outbound_new self.outbound_new = [] for queued in new_messages: if queued.state == QueuedOutboundMessage.STATE_NEW: if queued.message and queued.message.enc_payload: queued.payload = queued.message.enc_payload queued.state = QueuedOutboundMessage.STATE_PENDING new_pending += 1 else: queued.state = QueuedOutboundMessage.STATE_ENCODE self.encode_queued_message(queued) else: new_pending += 1 upd_buffer.append(queued) self.outbound_buffer = upd_buffer if self.outbound_buffer: if not new_pending: await self.outbound_event.wait() else: break
[ "async", "def", "_process_loop", "(", "self", ")", ":", "# Note: this method should not call async methods apart from", "# waiting for the updated event, to avoid yielding to other queue methods", "while", "True", ":", "self", ".", "outbound_event", ".", "clear", "(", ")", "loop_time", "=", "time", ".", "perf_counter", "(", ")", "upd_buffer", "=", "[", "]", "for", "queued", "in", "self", ".", "outbound_buffer", ":", "if", "queued", ".", "state", "==", "QueuedOutboundMessage", ".", "STATE_DONE", ":", "if", "queued", ".", "error", ":", "LOGGER", ".", "exception", "(", "\"Outbound message could not be delivered to %s\"", ",", "queued", ".", "endpoint", ",", "exc_info", "=", "queued", ".", "error", ",", ")", "if", "self", ".", "handle_not_delivered", ":", "self", ".", "handle_not_delivered", "(", "queued", ".", "context", ",", "queued", ".", "message", ")", "continue", "# remove from buffer", "deliver", "=", "False", "if", "queued", ".", "state", "==", "QueuedOutboundMessage", ".", "STATE_PENDING", ":", "deliver", "=", "True", "elif", "queued", ".", "state", "==", "QueuedOutboundMessage", ".", "STATE_RETRY", ":", "if", "queued", ".", "retry_at", "<", "loop_time", ":", "queued", ".", "retry_at", "=", "None", "deliver", "=", "True", "if", "deliver", ":", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_DELIVER", "self", ".", "deliver_queued_message", "(", "queued", ")", "upd_buffer", ".", "append", "(", "queued", ")", "new_pending", "=", "0", "new_messages", "=", "self", ".", "outbound_new", "self", ".", "outbound_new", "=", "[", "]", "for", "queued", "in", "new_messages", ":", "if", "queued", ".", "state", "==", "QueuedOutboundMessage", ".", "STATE_NEW", ":", "if", "queued", ".", "message", "and", "queued", ".", "message", ".", "enc_payload", ":", "queued", ".", "payload", "=", "queued", ".", "message", ".", "enc_payload", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_PENDING", "new_pending", "+=", "1", "else", ":", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_ENCODE", "self", ".", "encode_queued_message", "(", "queued", ")", "else", ":", "new_pending", "+=", "1", "upd_buffer", ".", "append", "(", "queued", ")", "self", ".", "outbound_buffer", "=", "upd_buffer", "if", "self", ".", "outbound_buffer", ":", "if", "not", "new_pending", ":", "await", "self", ".", "outbound_event", ".", "wait", "(", ")", "else", ":", "break" ]
[ 302, 4 ]
[ 362, 21 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.encode_queued_message
(self, queued: QueuedOutboundMessage)
Kick off encoding of a queued message.
Kick off encoding of a queued message.
def encode_queued_message(self, queued: QueuedOutboundMessage) -> asyncio.Task: """Kick off encoding of a queued message.""" queued.task = self.task_queue.run( self.perform_encode(queued), lambda completed: self.finished_encode(queued, completed), ) return queued.task
[ "def", "encode_queued_message", "(", "self", ",", "queued", ":", "QueuedOutboundMessage", ")", "->", "asyncio", ".", "Task", ":", "queued", ".", "task", "=", "self", ".", "task_queue", ".", "run", "(", "self", ".", "perform_encode", "(", "queued", ")", ",", "lambda", "completed", ":", "self", ".", "finished_encode", "(", "queued", ",", "completed", ")", ",", ")", "return", "queued", ".", "task" ]
[ 364, 4 ]
[ 370, 26 ]
python
en
['en', 'ca', 'en']
True
OutboundTransportManager.perform_encode
(self, queued: QueuedOutboundMessage)
Perform message encoding.
Perform message encoding.
async def perform_encode(self, queued: QueuedOutboundMessage): """Perform message encoding.""" transport = self.get_transport_instance(queued.transport_id) wire_format = transport.wire_format or await queued.context.inject( BaseWireFormat ) queued.payload = await wire_format.encode_message( queued.context, queued.message.payload, queued.target.recipient_keys, queued.target.routing_keys, queued.target.sender_key, )
[ "async", "def", "perform_encode", "(", "self", ",", "queued", ":", "QueuedOutboundMessage", ")", ":", "transport", "=", "self", ".", "get_transport_instance", "(", "queued", ".", "transport_id", ")", "wire_format", "=", "transport", ".", "wire_format", "or", "await", "queued", ".", "context", ".", "inject", "(", "BaseWireFormat", ")", "queued", ".", "payload", "=", "await", "wire_format", ".", "encode_message", "(", "queued", ".", "context", ",", "queued", ".", "message", ".", "payload", ",", "queued", ".", "target", ".", "recipient_keys", ",", "queued", ".", "target", ".", "routing_keys", ",", "queued", ".", "target", ".", "sender_key", ",", ")" ]
[ 372, 4 ]
[ 384, 9 ]
python
en
['es', 'en', 'en']
True
OutboundTransportManager.finished_encode
(self, queued: QueuedOutboundMessage, completed: CompletedTask)
Handle completion of queued message encoding.
Handle completion of queued message encoding.
def finished_encode(self, queued: QueuedOutboundMessage, completed: CompletedTask): """Handle completion of queued message encoding.""" if completed.exc_info: queued.error = completed.exc_info queued.state = QueuedOutboundMessage.STATE_DONE else: queued.state = QueuedOutboundMessage.STATE_PENDING queued.task = None self.process_queued()
[ "def", "finished_encode", "(", "self", ",", "queued", ":", "QueuedOutboundMessage", ",", "completed", ":", "CompletedTask", ")", ":", "if", "completed", ".", "exc_info", ":", "queued", ".", "error", "=", "completed", ".", "exc_info", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_DONE", "else", ":", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_PENDING", "queued", ".", "task", "=", "None", "self", ".", "process_queued", "(", ")" ]
[ 386, 4 ]
[ 394, 29 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.deliver_queued_message
(self, queued: QueuedOutboundMessage)
Kick off delivery of a queued message.
Kick off delivery of a queued message.
def deliver_queued_message(self, queued: QueuedOutboundMessage) -> asyncio.Task: """Kick off delivery of a queued message.""" transport = self.get_transport_instance(queued.transport_id) queued.task = self.task_queue.run( transport.handle_message(queued.payload, queued.endpoint), lambda completed: self.finished_deliver(queued, completed), ) return queued.task
[ "def", "deliver_queued_message", "(", "self", ",", "queued", ":", "QueuedOutboundMessage", ")", "->", "asyncio", ".", "Task", ":", "transport", "=", "self", ".", "get_transport_instance", "(", "queued", ".", "transport_id", ")", "queued", ".", "task", "=", "self", ".", "task_queue", ".", "run", "(", "transport", ".", "handle_message", "(", "queued", ".", "payload", ",", "queued", ".", "endpoint", ")", ",", "lambda", "completed", ":", "self", ".", "finished_deliver", "(", "queued", ",", "completed", ")", ",", ")", "return", "queued", ".", "task" ]
[ 396, 4 ]
[ 403, 26 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.finished_deliver
(self, queued: QueuedOutboundMessage, completed: CompletedTask)
Handle completion of queued message delivery.
Handle completion of queued message delivery.
def finished_deliver(self, queued: QueuedOutboundMessage, completed: CompletedTask): """Handle completion of queued message delivery.""" if completed.exc_info: queued.error = completed.exc_info LOGGER.exception( "Outbound message could not be delivered", exc_info=queued.error, ) if queued.retries: queued.retries -= 1 queued.state = QueuedOutboundMessage.STATE_RETRY queued.retry_at = time.perf_counter() + 10 else: queued.state = QueuedOutboundMessage.STATE_DONE else: queued.error = None queued.state = QueuedOutboundMessage.STATE_DONE queued.task = None self.process_queued()
[ "def", "finished_deliver", "(", "self", ",", "queued", ":", "QueuedOutboundMessage", ",", "completed", ":", "CompletedTask", ")", ":", "if", "completed", ".", "exc_info", ":", "queued", ".", "error", "=", "completed", ".", "exc_info", "LOGGER", ".", "exception", "(", "\"Outbound message could not be delivered\"", ",", "exc_info", "=", "queued", ".", "error", ",", ")", "if", "queued", ".", "retries", ":", "queued", ".", "retries", "-=", "1", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_RETRY", "queued", ".", "retry_at", "=", "time", ".", "perf_counter", "(", ")", "+", "10", "else", ":", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_DONE", "else", ":", "queued", ".", "error", "=", "None", "queued", ".", "state", "=", "QueuedOutboundMessage", ".", "STATE_DONE", "queued", ".", "task", "=", "None", "self", ".", "process_queued", "(", ")" ]
[ 405, 4 ]
[ 423, 29 ]
python
en
['en', 'en', 'en']
True
OutboundTransportManager.flush
(self)
Wait for any queued messages to be delivered.
Wait for any queued messages to be delivered.
async def flush(self): """Wait for any queued messages to be delivered.""" proc_task = self.process_queued() if proc_task: await proc_task
[ "async", "def", "flush", "(", "self", ")", ":", "proc_task", "=", "self", ".", "process_queued", "(", ")", "if", "proc_task", ":", "await", "proc_task" ]
[ 425, 4 ]
[ 429, 27 ]
python
en
['en', 'en', 'en']
True
Perform.__init__
(self, *, name: str = None, params: Mapping[str, str] = None, **kwargs)
Initialize a Perform object. Args: name: The name of the menu option params: Input parameter values
Initialize a Perform object.
def __init__(self, *, name: str = None, params: Mapping[str, str] = None, **kwargs): """ Initialize a Perform object. Args: name: The name of the menu option params: Input parameter values """ super(Perform, self).__init__(**kwargs) self.name = name self.params = params
[ "def", "__init__", "(", "self", ",", "*", ",", "name", ":", "str", "=", "None", ",", "params", ":", "Mapping", "[", "str", ",", "str", "]", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Perform", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "self", ".", "name", "=", "name", "self", ".", "params", "=", "params" ]
[ 23, 4 ]
[ 33, 28 ]
python
en
['en', 'error', 'th']
False
L2Norm.__init__
(self, n_dims, scale=20., eps=1e-10)
L2 normalization layer. Args: n_dims (int): Number of dimensions to be normalized scale (float, optional): Defaults to 20.. eps (float, optional): Used to avoid division by zero. Defaults to 1e-10.
L2 normalization layer.
def __init__(self, n_dims, scale=20., eps=1e-10): """L2 normalization layer. Args: n_dims (int): Number of dimensions to be normalized scale (float, optional): Defaults to 20.. eps (float, optional): Used to avoid division by zero. Defaults to 1e-10. """ super(L2Norm, self).__init__() self.n_dims = n_dims self.weight = nn.Parameter(torch.Tensor(self.n_dims)) self.eps = eps self.scale = scale
[ "def", "__init__", "(", "self", ",", "n_dims", ",", "scale", "=", "20.", ",", "eps", "=", "1e-10", ")", ":", "super", "(", "L2Norm", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "n_dims", "=", "n_dims", "self", ".", "weight", "=", "nn", ".", "Parameter", "(", "torch", ".", "Tensor", "(", "self", ".", "n_dims", ")", ")", "self", ".", "eps", "=", "eps", "self", ".", "scale", "=", "scale" ]
[ 147, 4 ]
[ 160, 26 ]
python
en
['es', 'it', 'en']
False
L2Norm.forward
(self, x)
Forward function.
Forward function.
def forward(self, x): """Forward function.""" # normalization layer convert to FP32 in FP16 training x_float = x.float() norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps return (self.weight[None, :, None, None].float().expand_as(x_float) * x_float / norm).type_as(x)
[ "def", "forward", "(", "self", ",", "x", ")", ":", "# normalization layer convert to FP32 in FP16 training", "x_float", "=", "x", ".", "float", "(", ")", "norm", "=", "x_float", ".", "pow", "(", "2", ")", ".", "sum", "(", "1", ",", "keepdim", "=", "True", ")", ".", "sqrt", "(", ")", "+", "self", ".", "eps", "return", "(", "self", ".", "weight", "[", "None", ",", ":", ",", "None", ",", "None", "]", ".", "float", "(", ")", ".", "expand_as", "(", "x_float", ")", "*", "x_float", "/", "norm", ")", ".", "type_as", "(", "x", ")" ]
[ 162, 4 ]
[ 168, 42 ]
python
en
['en', 'cy', 'en']
False
AbstractDistillTransformerAgentMixin._get_teacher_model
(self)
Return the teacher model. This logic is needed because the teacher model may be wrapped by torch.nn.parallel.DistributedDataParallel.
Return the teacher model.
def _get_teacher_model(self) -> nn.Module: """ Return the teacher model. This logic is needed because the teacher model may be wrapped by torch.nn.parallel.DistributedDataParallel. """ if hasattr(self.teacher_model, 'module'): return self.teacher_model.module else: return self.teacher_model
[ "def", "_get_teacher_model", "(", "self", ")", "->", "nn", ".", "Module", ":", "if", "hasattr", "(", "self", ".", "teacher_model", ",", "'module'", ")", ":", "return", "self", ".", "teacher_model", ".", "module", "else", ":", "return", "self", ".", "teacher_model" ]
[ 240, 4 ]
[ 250, 37 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._register_series_of_hooks
( self, model: nn.Module, module_map: Dict[str, Type[nn.Module]] )
Register hooks in modules of the model, given the mapping of module types. `module_map` is a dict whose keys are module-type names and whose values are module types. For each module type, during each forward pass of `model`, all outputs of modules of that type will be saved to `hooks[module_type].outputs`.
Register hooks in modules of the model, given the mapping of module types.
def _register_series_of_hooks( self, model: nn.Module, module_map: Dict[str, Type[nn.Module]] ) -> Dict[str, OutputRecorder]: """ Register hooks in modules of the model, given the mapping of module types. `module_map` is a dict whose keys are module-type names and whose values are module types. For each module type, during each forward pass of `model`, all outputs of modules of that type will be saved to `hooks[module_type].outputs`. """ hooks = {} for module_name, module_type in module_map.items(): hooks[module_name] = OutputRecorder() for module in model.modules(): if isinstance(module, module_type): module.register_forward_hook(hooks[module_name]) return hooks
[ "def", "_register_series_of_hooks", "(", "self", ",", "model", ":", "nn", ".", "Module", ",", "module_map", ":", "Dict", "[", "str", ",", "Type", "[", "nn", ".", "Module", "]", "]", ")", "->", "Dict", "[", "str", ",", "OutputRecorder", "]", ":", "hooks", "=", "{", "}", "for", "module_name", ",", "module_type", "in", "module_map", ".", "items", "(", ")", ":", "hooks", "[", "module_name", "]", "=", "OutputRecorder", "(", ")", "for", "module", "in", "model", ".", "modules", "(", ")", ":", "if", "isinstance", "(", "module", ",", "module_type", ")", ":", "module", ".", "register_forward_hook", "(", "hooks", "[", "module_name", "]", ")", "return", "hooks" ]
[ 252, 4 ]
[ 268, 20 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin.compute_loss
(self, batch, return_output=False)
Return the loss. This will likely call self._perform_forward_passes().
Return the loss.
def compute_loss(self, batch, return_output=False): """ Return the loss. This will likely call self._perform_forward_passes(). """
[ "def", "compute_loss", "(", "self", ",", "batch", ",", "return_output", "=", "False", ")", ":" ]
[ 271, 4 ]
[ 276, 11 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._perform_forward_passes
(self, batch: Batch)
Perform forward passes through the student and teacher and pass back outputs.
Perform forward passes through the student and teacher and pass back outputs.
def _perform_forward_passes(self, batch: Batch) -> ForwardPassOutputs: """ Perform forward passes through the student and teacher and pass back outputs. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods mask = batch.label_vec != self.NULL_IDX self._clear_hook_outputs(self.hooks) # Forward pass through teacher model with torch.no_grad(): teacher_scores, teacher_preds, teacher_enc_states = self.teacher_model( *self._model_input(batch), ys=batch.label_vec ) teacher_enc_output, context_mask = teacher_enc_states # Forward pass through student model task_loss, student_output = super().compute_loss(batch, return_output=True) student_scores, student_preds, student_enc_states = student_output student_enc_output, _ = student_enc_states # Compile all outputs given the hooks teacher_embedding_outputs = self._extract_embedding_outputs( hooks=self.hooks['teacher'] ) student_embedding_outputs = self._extract_embedding_outputs( hooks=self.hooks['student'] ) teacher_hidden_states = self._extract_hidden_states( hooks=self.hooks['teacher'], num_enc_layers=self.teacher_num_enc_layers, num_dec_layers=self.teacher_num_dec_layers, ) student_hidden_states = self._extract_hidden_states( hooks=self.hooks['student'], num_enc_layers=self.student_num_enc_layers, num_dec_layers=self.student_num_dec_layers, ) teacher_attention_matrices = self._extract_attention_matrices( hooks=self.hooks['teacher'], num_enc_layers=self.teacher_num_enc_layers, num_dec_layers=self.teacher_num_dec_layers, ) student_attention_matrices = self._extract_attention_matrices( hooks=self.hooks['student'], num_enc_layers=self.student_num_enc_layers, num_dec_layers=self.student_num_dec_layers, ) self._clear_hook_outputs(self.hooks) tokens_per_example = mask.sum(dim=-1) num_tokens = mask.sum() context_tokens_per_example = context_mask.sum(dim=-1) num_context_tokens = context_mask.sum() # If needed, perform further manipulation of the mask tensor mask = self._manipulate_mask( mask=mask, student_scores=student_scores, batch=batch ) # Record teacher accuracy teacher_acc = ((student_preds == teacher_preds) * mask).sum(dim=-1) self.record_local_metric( 'teacher_acc', AverageMetric.many(teacher_acc, tokens_per_example) ) return ForwardPassOutputs( mask=mask, tokens_per_example=tokens_per_example, num_tokens=num_tokens, context_mask=context_mask, context_tokens_per_example=context_tokens_per_example, num_context_tokens=num_context_tokens, task_loss=task_loss, teacher_scores=teacher_scores, teacher_enc_output=teacher_enc_output, teacher_embedding_outputs=teacher_embedding_outputs, teacher_hidden_states=teacher_hidden_states, teacher_attention_matrices=teacher_attention_matrices, student_output=student_output, student_scores=student_scores, student_enc_output=student_enc_output, student_embedding_outputs=student_embedding_outputs, student_hidden_states=student_hidden_states, student_attention_matrices=student_attention_matrices, )
[ "def", "_perform_forward_passes", "(", "self", ",", "batch", ":", "Batch", ")", "->", "ForwardPassOutputs", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "mask", "=", "batch", ".", "label_vec", "!=", "self", ".", "NULL_IDX", "self", ".", "_clear_hook_outputs", "(", "self", ".", "hooks", ")", "# Forward pass through teacher model", "with", "torch", ".", "no_grad", "(", ")", ":", "teacher_scores", ",", "teacher_preds", ",", "teacher_enc_states", "=", "self", ".", "teacher_model", "(", "*", "self", ".", "_model_input", "(", "batch", ")", ",", "ys", "=", "batch", ".", "label_vec", ")", "teacher_enc_output", ",", "context_mask", "=", "teacher_enc_states", "# Forward pass through student model", "task_loss", ",", "student_output", "=", "super", "(", ")", ".", "compute_loss", "(", "batch", ",", "return_output", "=", "True", ")", "student_scores", ",", "student_preds", ",", "student_enc_states", "=", "student_output", "student_enc_output", ",", "_", "=", "student_enc_states", "# Compile all outputs given the hooks", "teacher_embedding_outputs", "=", "self", ".", "_extract_embedding_outputs", "(", "hooks", "=", "self", ".", "hooks", "[", "'teacher'", "]", ")", "student_embedding_outputs", "=", "self", ".", "_extract_embedding_outputs", "(", "hooks", "=", "self", ".", "hooks", "[", "'student'", "]", ")", "teacher_hidden_states", "=", "self", ".", "_extract_hidden_states", "(", "hooks", "=", "self", ".", "hooks", "[", "'teacher'", "]", ",", "num_enc_layers", "=", "self", ".", "teacher_num_enc_layers", ",", "num_dec_layers", "=", "self", ".", "teacher_num_dec_layers", ",", ")", "student_hidden_states", "=", "self", ".", "_extract_hidden_states", "(", "hooks", "=", "self", ".", "hooks", "[", "'student'", "]", ",", "num_enc_layers", "=", "self", ".", "student_num_enc_layers", ",", "num_dec_layers", "=", "self", ".", "student_num_dec_layers", ",", ")", "teacher_attention_matrices", "=", "self", ".", "_extract_attention_matrices", "(", "hooks", "=", "self", ".", "hooks", "[", "'teacher'", "]", ",", "num_enc_layers", "=", "self", ".", "teacher_num_enc_layers", ",", "num_dec_layers", "=", "self", ".", "teacher_num_dec_layers", ",", ")", "student_attention_matrices", "=", "self", ".", "_extract_attention_matrices", "(", "hooks", "=", "self", ".", "hooks", "[", "'student'", "]", ",", "num_enc_layers", "=", "self", ".", "student_num_enc_layers", ",", "num_dec_layers", "=", "self", ".", "student_num_dec_layers", ",", ")", "self", ".", "_clear_hook_outputs", "(", "self", ".", "hooks", ")", "tokens_per_example", "=", "mask", ".", "sum", "(", "dim", "=", "-", "1", ")", "num_tokens", "=", "mask", ".", "sum", "(", ")", "context_tokens_per_example", "=", "context_mask", ".", "sum", "(", "dim", "=", "-", "1", ")", "num_context_tokens", "=", "context_mask", ".", "sum", "(", ")", "# If needed, perform further manipulation of the mask tensor", "mask", "=", "self", ".", "_manipulate_mask", "(", "mask", "=", "mask", ",", "student_scores", "=", "student_scores", ",", "batch", "=", "batch", ")", "# Record teacher accuracy", "teacher_acc", "=", "(", "(", "student_preds", "==", "teacher_preds", ")", "*", "mask", ")", ".", "sum", "(", "dim", "=", "-", "1", ")", "self", ".", "record_local_metric", "(", "'teacher_acc'", ",", "AverageMetric", ".", "many", "(", "teacher_acc", ",", "tokens_per_example", ")", ")", "return", "ForwardPassOutputs", "(", "mask", "=", "mask", ",", "tokens_per_example", "=", "tokens_per_example", ",", "num_tokens", "=", "num_tokens", ",", "context_mask", "=", "context_mask", ",", "context_tokens_per_example", "=", "context_tokens_per_example", ",", "num_context_tokens", "=", "num_context_tokens", ",", "task_loss", "=", "task_loss", ",", "teacher_scores", "=", "teacher_scores", ",", "teacher_enc_output", "=", "teacher_enc_output", ",", "teacher_embedding_outputs", "=", "teacher_embedding_outputs", ",", "teacher_hidden_states", "=", "teacher_hidden_states", ",", "teacher_attention_matrices", "=", "teacher_attention_matrices", ",", "student_output", "=", "student_output", ",", "student_scores", "=", "student_scores", ",", "student_enc_output", "=", "student_enc_output", ",", "student_embedding_outputs", "=", "student_embedding_outputs", ",", "student_hidden_states", "=", "student_hidden_states", ",", "student_attention_matrices", "=", "student_attention_matrices", ",", ")" ]
[ 278, 4 ]
[ 366, 9 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._manipulate_mask
( self, mask: torch.BoolTensor, student_scores: torch.Tensor, batch: Batch )
If necessary, perform further manipulations of the mask. Needed for BART-based student models to add in an extra start token.
If necessary, perform further manipulations of the mask.
def _manipulate_mask( self, mask: torch.BoolTensor, student_scores: torch.Tensor, batch: Batch ) -> torch.BoolTensor: """ If necessary, perform further manipulations of the mask. Needed for BART-based student models to add in an extra start token. """ if hasattr(super(), '_manipulate_mask'): # Defer to any agent-specific method for manipulating the mask return super()._manipulate_mask( mask=mask, student_scores=student_scores, batch=batch ) else: return mask
[ "def", "_manipulate_mask", "(", "self", ",", "mask", ":", "torch", ".", "BoolTensor", ",", "student_scores", ":", "torch", ".", "Tensor", ",", "batch", ":", "Batch", ")", "->", "torch", ".", "BoolTensor", ":", "if", "hasattr", "(", "super", "(", ")", ",", "'_manipulate_mask'", ")", ":", "# Defer to any agent-specific method for manipulating the mask", "return", "super", "(", ")", ".", "_manipulate_mask", "(", "mask", "=", "mask", ",", "student_scores", "=", "student_scores", ",", "batch", "=", "batch", ")", "else", ":", "return", "mask" ]
[ 368, 4 ]
[ 382, 23 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._extract_embedding_outputs
( self, hooks: Dict[str, Dict[str, OutputRecorder]] )
Extract out the encoder and decoder embedding outputs.
Extract out the encoder and decoder embedding outputs.
def _extract_embedding_outputs( self, hooks: Dict[str, Dict[str, OutputRecorder]] ) -> Dict[str, torch.Tensor]: """ Extract out the encoder and decoder embedding outputs. """ assert len(hooks['embeddings'].outputs) == 2 return { 'encoder': hooks['embeddings'].outputs[0], 'decoder': hooks['embeddings'].outputs[1], }
[ "def", "_extract_embedding_outputs", "(", "self", ",", "hooks", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "OutputRecorder", "]", "]", ")", "->", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ":", "assert", "len", "(", "hooks", "[", "'embeddings'", "]", ".", "outputs", ")", "==", "2", "return", "{", "'encoder'", ":", "hooks", "[", "'embeddings'", "]", ".", "outputs", "[", "0", "]", ",", "'decoder'", ":", "hooks", "[", "'embeddings'", "]", ".", "outputs", "[", "1", "]", ",", "}" ]
[ 384, 4 ]
[ 394, 9 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._extract_hidden_states
( self, hooks: Dict[str, Dict[str, OutputRecorder]], num_enc_layers: int, num_dec_layers: int, )
Extract out encoder/decoder hidden states per layer.
Extract out encoder/decoder hidden states per layer.
def _extract_hidden_states( self, hooks: Dict[str, Dict[str, OutputRecorder]], num_enc_layers: int, num_dec_layers: int, ) -> Dict[str, List[torch.Tensor]]: """ Extract out encoder/decoder hidden states per layer. """ assert len(hooks['encoder']['layers'].outputs) == num_enc_layers assert len(hooks['decoder']['layers'].outputs) == num_dec_layers return { 'encoder': hooks['encoder']['layers'].outputs, 'decoder': [out_[0] for out_ in hooks['decoder']['layers'].outputs], }
[ "def", "_extract_hidden_states", "(", "self", ",", "hooks", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "OutputRecorder", "]", "]", ",", "num_enc_layers", ":", "int", ",", "num_dec_layers", ":", "int", ",", ")", "->", "Dict", "[", "str", ",", "List", "[", "torch", ".", "Tensor", "]", "]", ":", "assert", "len", "(", "hooks", "[", "'encoder'", "]", "[", "'layers'", "]", ".", "outputs", ")", "==", "num_enc_layers", "assert", "len", "(", "hooks", "[", "'decoder'", "]", "[", "'layers'", "]", ".", "outputs", ")", "==", "num_dec_layers", "return", "{", "'encoder'", ":", "hooks", "[", "'encoder'", "]", "[", "'layers'", "]", ".", "outputs", ",", "'decoder'", ":", "[", "out_", "[", "0", "]", "for", "out_", "in", "hooks", "[", "'decoder'", "]", "[", "'layers'", "]", ".", "outputs", "]", ",", "}" ]
[ 396, 4 ]
[ 410, 9 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._extract_attention_matrices
( self, hooks: Dict[str, Dict[str, OutputRecorder]], num_enc_layers: int, num_dec_layers: int, )
Extract out encoder/decoder attention matrices per layer and attention type.
Extract out encoder/decoder attention matrices per layer and attention type.
def _extract_attention_matrices( self, hooks: Dict[str, Dict[str, OutputRecorder]], num_enc_layers: int, num_dec_layers: int, ) -> Dict[str, List[Dict[str, torch.Tensor]]]: """ Extract out encoder/decoder attention matrices per layer and attention type. """ assert len(hooks['encoder']['attentions'].outputs) == num_enc_layers assert len(hooks['decoder']['attentions'].outputs) == 2 * num_dec_layers output_idx = 2 # The position of the attention matrix among the outputs return { 'encoder': [ { 'self_attn': hooks['encoder']['attentions'].outputs[layer_idx][ output_idx ] } for layer_idx in range(num_enc_layers) ], 'decoder': [ { 'self_attn': hooks['decoder']['attentions'].outputs[2 * layer_idx][ output_idx ], 'encoder_attn': hooks['decoder']['attentions'].outputs[ 2 * layer_idx + 1 ][output_idx], } for layer_idx in range(num_dec_layers) ], }
[ "def", "_extract_attention_matrices", "(", "self", ",", "hooks", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "OutputRecorder", "]", "]", ",", "num_enc_layers", ":", "int", ",", "num_dec_layers", ":", "int", ",", ")", "->", "Dict", "[", "str", ",", "List", "[", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", "]", "]", ":", "assert", "len", "(", "hooks", "[", "'encoder'", "]", "[", "'attentions'", "]", ".", "outputs", ")", "==", "num_enc_layers", "assert", "len", "(", "hooks", "[", "'decoder'", "]", "[", "'attentions'", "]", ".", "outputs", ")", "==", "2", "*", "num_dec_layers", "output_idx", "=", "2", "# The position of the attention matrix among the outputs", "return", "{", "'encoder'", ":", "[", "{", "'self_attn'", ":", "hooks", "[", "'encoder'", "]", "[", "'attentions'", "]", ".", "outputs", "[", "layer_idx", "]", "[", "output_idx", "]", "}", "for", "layer_idx", "in", "range", "(", "num_enc_layers", ")", "]", ",", "'decoder'", ":", "[", "{", "'self_attn'", ":", "hooks", "[", "'decoder'", "]", "[", "'attentions'", "]", ".", "outputs", "[", "2", "*", "layer_idx", "]", "[", "output_idx", "]", ",", "'encoder_attn'", ":", "hooks", "[", "'decoder'", "]", "[", "'attentions'", "]", ".", "outputs", "[", "2", "*", "layer_idx", "+", "1", "]", "[", "output_idx", "]", ",", "}", "for", "layer_idx", "in", "range", "(", "num_dec_layers", ")", "]", ",", "}" ]
[ 412, 4 ]
[ 444, 9 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._clear_hook_outputs
(self, hooks: Union[Dict[str, Any], OutputRecorder])
Recursively clear outputs from all hooks.
Recursively clear outputs from all hooks.
def _clear_hook_outputs(self, hooks: Union[Dict[str, Any], OutputRecorder]): """ Recursively clear outputs from all hooks. """ if isinstance(hooks, dict): for subhooks in hooks.values(): self._clear_hook_outputs(subhooks) else: # `hooks` is an OutputRecorder hooks.clear()
[ "def", "_clear_hook_outputs", "(", "self", ",", "hooks", ":", "Union", "[", "Dict", "[", "str", ",", "Any", "]", ",", "OutputRecorder", "]", ")", ":", "if", "isinstance", "(", "hooks", ",", "dict", ")", ":", "for", "subhooks", "in", "hooks", ".", "values", "(", ")", ":", "self", ".", "_clear_hook_outputs", "(", "subhooks", ")", "else", ":", "# `hooks` is an OutputRecorder", "hooks", ".", "clear", "(", ")" ]
[ 446, 4 ]
[ 455, 25 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_encoder_loss
(self, fwd_pass: ForwardPassOutputs)
Return the loss on the encoder's output layer.
Return the loss on the encoder's output layer.
def _get_encoder_loss(self, fwd_pass: ForwardPassOutputs) -> torch.Tensor: """ Return the loss on the encoder's output layer. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods encoder_loss = F.mse_loss( input=fwd_pass.student_enc_output, target=fwd_pass.teacher_enc_output, reduction='none', ) encoder_loss = encoder_loss.mean(dim=-1) * fwd_pass.context_mask # Avg over embedding dim self.record_local_metric( 'enc_loss', AverageMetric.many( encoder_loss.sum(dim=-1), fwd_pass.context_tokens_per_example ), ) # Sum over token dim encoder_loss = encoder_loss.div(fwd_pass.num_context_tokens).sum() # Divide before summing over examples so that values don't get too large return encoder_loss
[ "def", "_get_encoder_loss", "(", "self", ",", "fwd_pass", ":", "ForwardPassOutputs", ")", "->", "torch", ".", "Tensor", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "encoder_loss", "=", "F", ".", "mse_loss", "(", "input", "=", "fwd_pass", ".", "student_enc_output", ",", "target", "=", "fwd_pass", ".", "teacher_enc_output", ",", "reduction", "=", "'none'", ",", ")", "encoder_loss", "=", "encoder_loss", ".", "mean", "(", "dim", "=", "-", "1", ")", "*", "fwd_pass", ".", "context_mask", "# Avg over embedding dim", "self", ".", "record_local_metric", "(", "'enc_loss'", ",", "AverageMetric", ".", "many", "(", "encoder_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", ",", "fwd_pass", ".", "context_tokens_per_example", ")", ",", ")", "# Sum over token dim", "encoder_loss", "=", "encoder_loss", ".", "div", "(", "fwd_pass", ".", "num_context_tokens", ")", ".", "sum", "(", ")", "# Divide before summing over examples so that values don't get too large", "return", "encoder_loss" ]
[ 457, 4 ]
[ 478, 27 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_embedding_losses
( self, fwd_pass: ForwardPassOutputs )
Return the encoder and decoder embedding losses.
Return the encoder and decoder embedding losses.
def _get_embedding_losses( self, fwd_pass: ForwardPassOutputs ) -> Tuple[torch.Tensor, torch.Tensor]: """ Return the encoder and decoder embedding losses. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods enc_emb_loss, enc_emb_loss_per_example = self._get_component_embedding_loss( student_emb_output=fwd_pass.student_embedding_outputs['encoder'], teacher_emb_output=fwd_pass.teacher_embedding_outputs['encoder'], mask=fwd_pass.context_mask, num_tokens=fwd_pass.num_context_tokens, ) self.record_local_metric( 'enc_emb_loss', AverageMetric.many( enc_emb_loss_per_example, fwd_pass.context_tokens_per_example ), ) dec_emb_loss, dec_emb_loss_per_example = self._get_component_embedding_loss( student_emb_output=fwd_pass.student_embedding_outputs['decoder'], teacher_emb_output=fwd_pass.teacher_embedding_outputs['decoder'], mask=fwd_pass.mask, num_tokens=fwd_pass.num_tokens, ) self.record_local_metric( 'dec_emb_loss', AverageMetric.many(dec_emb_loss_per_example, fwd_pass.tokens_per_example), ) return enc_emb_loss, dec_emb_loss
[ "def", "_get_embedding_losses", "(", "self", ",", "fwd_pass", ":", "ForwardPassOutputs", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "enc_emb_loss", ",", "enc_emb_loss_per_example", "=", "self", ".", "_get_component_embedding_loss", "(", "student_emb_output", "=", "fwd_pass", ".", "student_embedding_outputs", "[", "'encoder'", "]", ",", "teacher_emb_output", "=", "fwd_pass", ".", "teacher_embedding_outputs", "[", "'encoder'", "]", ",", "mask", "=", "fwd_pass", ".", "context_mask", ",", "num_tokens", "=", "fwd_pass", ".", "num_context_tokens", ",", ")", "self", ".", "record_local_metric", "(", "'enc_emb_loss'", ",", "AverageMetric", ".", "many", "(", "enc_emb_loss_per_example", ",", "fwd_pass", ".", "context_tokens_per_example", ")", ",", ")", "dec_emb_loss", ",", "dec_emb_loss_per_example", "=", "self", ".", "_get_component_embedding_loss", "(", "student_emb_output", "=", "fwd_pass", ".", "student_embedding_outputs", "[", "'decoder'", "]", ",", "teacher_emb_output", "=", "fwd_pass", ".", "teacher_embedding_outputs", "[", "'decoder'", "]", ",", "mask", "=", "fwd_pass", ".", "mask", ",", "num_tokens", "=", "fwd_pass", ".", "num_tokens", ",", ")", "self", ".", "record_local_metric", "(", "'dec_emb_loss'", ",", "AverageMetric", ".", "many", "(", "dec_emb_loss_per_example", ",", "fwd_pass", ".", "tokens_per_example", ")", ",", ")", "return", "enc_emb_loss", ",", "dec_emb_loss" ]
[ 480, 4 ]
[ 510, 41 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_component_embedding_loss
( self, student_emb_output: torch.Tensor, teacher_emb_output: torch.Tensor, mask: torch.BoolTensor, num_tokens: torch.Tensor, )
Compute the embedding loss for either the encoder or the decoder.
Compute the embedding loss for either the encoder or the decoder.
def _get_component_embedding_loss( self, student_emb_output: torch.Tensor, teacher_emb_output: torch.Tensor, mask: torch.BoolTensor, num_tokens: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute the embedding loss for either the encoder or the decoder. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods raw_loss = F.mse_loss( input=student_emb_output, target=teacher_emb_output, reduction='none' ) clamped_loss = torch.clamp(raw_loss, min=0, max=NEAR_INF_FP16) # Prevent infs from appearing in the loss term. Especially important with fp16 masked_loss = clamped_loss.sum(dim=-1) * mask # Sum over embedding dim embedding_loss_per_example = masked_loss.sum(dim=-1) # Sum over token dim embedding_loss = masked_loss.div(num_tokens).sum() # Divide before summing over examples so that values don't get too large return embedding_loss, embedding_loss_per_example
[ "def", "_get_component_embedding_loss", "(", "self", ",", "student_emb_output", ":", "torch", ".", "Tensor", ",", "teacher_emb_output", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "BoolTensor", ",", "num_tokens", ":", "torch", ".", "Tensor", ",", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "raw_loss", "=", "F", ".", "mse_loss", "(", "input", "=", "student_emb_output", ",", "target", "=", "teacher_emb_output", ",", "reduction", "=", "'none'", ")", "clamped_loss", "=", "torch", ".", "clamp", "(", "raw_loss", ",", "min", "=", "0", ",", "max", "=", "NEAR_INF_FP16", ")", "# Prevent infs from appearing in the loss term. Especially important with fp16", "masked_loss", "=", "clamped_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", "*", "mask", "# Sum over embedding dim", "embedding_loss_per_example", "=", "masked_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", "# Sum over token dim", "embedding_loss", "=", "masked_loss", ".", "div", "(", "num_tokens", ")", ".", "sum", "(", ")", "# Divide before summing over examples so that values don't get too large", "return", "embedding_loss", ",", "embedding_loss_per_example" ]
[ 512, 4 ]
[ 534, 57 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_hidden_losses
( self, fwd_pass: ForwardPassOutputs )
Return the encoder and decoder hidden losses.
Return the encoder and decoder hidden losses.
def _get_hidden_losses( self, fwd_pass: ForwardPassOutputs ) -> Tuple[torch.Tensor, torch.Tensor]: """ Return the encoder and decoder hidden losses. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods enc_hidden_loss, enc_hidden_loss_per_example = self._get_component_hidden_loss( student_hidden_states=fwd_pass.student_hidden_states['encoder'], teacher_hidden_states=fwd_pass.teacher_hidden_states['encoder'], mask=fwd_pass.context_mask, num_tokens=fwd_pass.num_context_tokens, mapped_layers=self.mapped_enc_layers, ) self.record_local_metric( 'enc_hid_loss', AverageMetric.many( enc_hidden_loss_per_example, fwd_pass.context_tokens_per_example ), ) dec_hidden_loss, dec_hidden_loss_per_example = self._get_component_hidden_loss( student_hidden_states=fwd_pass.student_hidden_states['decoder'], teacher_hidden_states=fwd_pass.teacher_hidden_states['decoder'], mask=fwd_pass.mask, num_tokens=fwd_pass.num_tokens, mapped_layers=self.mapped_dec_layers, ) self.record_local_metric( 'dec_hid_loss', AverageMetric.many( dec_hidden_loss_per_example, fwd_pass.tokens_per_example ), ) return enc_hidden_loss, dec_hidden_loss
[ "def", "_get_hidden_losses", "(", "self", ",", "fwd_pass", ":", "ForwardPassOutputs", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "enc_hidden_loss", ",", "enc_hidden_loss_per_example", "=", "self", ".", "_get_component_hidden_loss", "(", "student_hidden_states", "=", "fwd_pass", ".", "student_hidden_states", "[", "'encoder'", "]", ",", "teacher_hidden_states", "=", "fwd_pass", ".", "teacher_hidden_states", "[", "'encoder'", "]", ",", "mask", "=", "fwd_pass", ".", "context_mask", ",", "num_tokens", "=", "fwd_pass", ".", "num_context_tokens", ",", "mapped_layers", "=", "self", ".", "mapped_enc_layers", ",", ")", "self", ".", "record_local_metric", "(", "'enc_hid_loss'", ",", "AverageMetric", ".", "many", "(", "enc_hidden_loss_per_example", ",", "fwd_pass", ".", "context_tokens_per_example", ")", ",", ")", "dec_hidden_loss", ",", "dec_hidden_loss_per_example", "=", "self", ".", "_get_component_hidden_loss", "(", "student_hidden_states", "=", "fwd_pass", ".", "student_hidden_states", "[", "'decoder'", "]", ",", "teacher_hidden_states", "=", "fwd_pass", ".", "teacher_hidden_states", "[", "'decoder'", "]", ",", "mask", "=", "fwd_pass", ".", "mask", ",", "num_tokens", "=", "fwd_pass", ".", "num_tokens", ",", "mapped_layers", "=", "self", ".", "mapped_dec_layers", ",", ")", "self", ".", "record_local_metric", "(", "'dec_hid_loss'", ",", "AverageMetric", ".", "many", "(", "dec_hidden_loss_per_example", ",", "fwd_pass", ".", "tokens_per_example", ")", ",", ")", "return", "enc_hidden_loss", ",", "dec_hidden_loss" ]
[ 536, 4 ]
[ 570, 47 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_component_hidden_loss
( self, student_hidden_states: List[torch.Tensor], teacher_hidden_states: List[torch.Tensor], mask: torch.BoolTensor, num_tokens: torch.Tensor, mapped_layers: List[int], )
Compute the loss across all hidden layers for either the encoder or the decoder. (The loss is averaged across all hidden layers and over the embedding dimension so that it doesn't get too high for fp16 tensors.)
Compute the loss across all hidden layers for either the encoder or the decoder.
def _get_component_hidden_loss( self, student_hidden_states: List[torch.Tensor], teacher_hidden_states: List[torch.Tensor], mask: torch.BoolTensor, num_tokens: torch.Tensor, mapped_layers: List[int], ) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute the loss across all hidden layers for either the encoder or the decoder. (The loss is averaged across all hidden layers and over the embedding dimension so that it doesn't get too high for fp16 tensors.) """ per_layer_losses = [] per_layer_per_example_losses = [] for student_layer_idx, teacher_layer_idx in enumerate(mapped_layers): raw_layer_loss = F.mse_loss( input=student_hidden_states[student_layer_idx], target=teacher_hidden_states[teacher_layer_idx], reduction='none', ) clamped_layer_loss = torch.clamp(raw_layer_loss, min=0, max=NEAR_INF_FP16) # Prevent infs from appearing in the loss term. Especially important with # fp16 masked_layer_loss = clamped_layer_loss.mean(dim=-1) * mask # Avg over embedding dim layer_loss_per_example = masked_layer_loss.sum(dim=-1) # Sum over token dim layer_loss = masked_layer_loss.div(num_tokens).sum() # Divide before summing over examples so that values don't get too large per_layer_losses.append(layer_loss) per_layer_per_example_losses.append(layer_loss_per_example) hidden_loss = torch.stack(per_layer_losses).mean() hidden_loss_per_example = torch.stack(per_layer_per_example_losses, dim=1).mean( dim=1 ) return hidden_loss, hidden_loss_per_example
[ "def", "_get_component_hidden_loss", "(", "self", ",", "student_hidden_states", ":", "List", "[", "torch", ".", "Tensor", "]", ",", "teacher_hidden_states", ":", "List", "[", "torch", ".", "Tensor", "]", ",", "mask", ":", "torch", ".", "BoolTensor", ",", "num_tokens", ":", "torch", ".", "Tensor", ",", "mapped_layers", ":", "List", "[", "int", "]", ",", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "per_layer_losses", "=", "[", "]", "per_layer_per_example_losses", "=", "[", "]", "for", "student_layer_idx", ",", "teacher_layer_idx", "in", "enumerate", "(", "mapped_layers", ")", ":", "raw_layer_loss", "=", "F", ".", "mse_loss", "(", "input", "=", "student_hidden_states", "[", "student_layer_idx", "]", ",", "target", "=", "teacher_hidden_states", "[", "teacher_layer_idx", "]", ",", "reduction", "=", "'none'", ",", ")", "clamped_layer_loss", "=", "torch", ".", "clamp", "(", "raw_layer_loss", ",", "min", "=", "0", ",", "max", "=", "NEAR_INF_FP16", ")", "# Prevent infs from appearing in the loss term. Especially important with", "# fp16", "masked_layer_loss", "=", "clamped_layer_loss", ".", "mean", "(", "dim", "=", "-", "1", ")", "*", "mask", "# Avg over embedding dim", "layer_loss_per_example", "=", "masked_layer_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", "# Sum over token dim", "layer_loss", "=", "masked_layer_loss", ".", "div", "(", "num_tokens", ")", ".", "sum", "(", ")", "# Divide before summing over examples so that values don't get too large", "per_layer_losses", ".", "append", "(", "layer_loss", ")", "per_layer_per_example_losses", ".", "append", "(", "layer_loss_per_example", ")", "hidden_loss", "=", "torch", ".", "stack", "(", "per_layer_losses", ")", ".", "mean", "(", ")", "hidden_loss_per_example", "=", "torch", ".", "stack", "(", "per_layer_per_example_losses", ",", "dim", "=", "1", ")", ".", "mean", "(", "dim", "=", "1", ")", "return", "hidden_loss", ",", "hidden_loss_per_example" ]
[ 572, 4 ]
[ 608, 51 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_attention_losses
( self, fwd_pass: ForwardPassOutputs )
Return attention losses. Compute and return losses on encoder and decoder self-attention and decoder enc/dec attention.
Return attention losses.
def _get_attention_losses( self, fwd_pass: ForwardPassOutputs ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Return attention losses. Compute and return losses on encoder and decoder self-attention and decoder enc/dec attention. """ enc_self_attn_loss = self._get_and_record_component_attention_loss( student_attention_matrices=fwd_pass.student_attention_matrices['encoder'], teacher_attention_matrices=fwd_pass.teacher_attention_matrices['encoder'], mask=fwd_pass.context_mask, tokens_per_example=fwd_pass.context_tokens_per_example, num_tokens=fwd_pass.num_context_tokens, mapped_layers=self.mapped_enc_layers, attn_type='self_attn', metric_name='enc_self_attn_loss', ) dec_self_attn_loss = self._get_and_record_component_attention_loss( student_attention_matrices=fwd_pass.student_attention_matrices['decoder'], teacher_attention_matrices=fwd_pass.teacher_attention_matrices['decoder'], mask=fwd_pass.mask, tokens_per_example=fwd_pass.tokens_per_example, num_tokens=fwd_pass.num_tokens, mapped_layers=self.mapped_dec_layers, attn_type='self_attn', metric_name='dec_self_attn_loss', ) enc_dec_attn_loss = self._get_and_record_component_attention_loss( student_attention_matrices=fwd_pass.student_attention_matrices['decoder'], teacher_attention_matrices=fwd_pass.teacher_attention_matrices['decoder'], mask=fwd_pass.mask, tokens_per_example=fwd_pass.tokens_per_example, num_tokens=fwd_pass.num_tokens, mapped_layers=self.mapped_dec_layers, attn_type='encoder_attn', metric_name='enc_dec_attn_loss', ) return enc_self_attn_loss, dec_self_attn_loss, enc_dec_attn_loss
[ "def", "_get_attention_losses", "(", "self", ",", "fwd_pass", ":", "ForwardPassOutputs", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "enc_self_attn_loss", "=", "self", ".", "_get_and_record_component_attention_loss", "(", "student_attention_matrices", "=", "fwd_pass", ".", "student_attention_matrices", "[", "'encoder'", "]", ",", "teacher_attention_matrices", "=", "fwd_pass", ".", "teacher_attention_matrices", "[", "'encoder'", "]", ",", "mask", "=", "fwd_pass", ".", "context_mask", ",", "tokens_per_example", "=", "fwd_pass", ".", "context_tokens_per_example", ",", "num_tokens", "=", "fwd_pass", ".", "num_context_tokens", ",", "mapped_layers", "=", "self", ".", "mapped_enc_layers", ",", "attn_type", "=", "'self_attn'", ",", "metric_name", "=", "'enc_self_attn_loss'", ",", ")", "dec_self_attn_loss", "=", "self", ".", "_get_and_record_component_attention_loss", "(", "student_attention_matrices", "=", "fwd_pass", ".", "student_attention_matrices", "[", "'decoder'", "]", ",", "teacher_attention_matrices", "=", "fwd_pass", ".", "teacher_attention_matrices", "[", "'decoder'", "]", ",", "mask", "=", "fwd_pass", ".", "mask", ",", "tokens_per_example", "=", "fwd_pass", ".", "tokens_per_example", ",", "num_tokens", "=", "fwd_pass", ".", "num_tokens", ",", "mapped_layers", "=", "self", ".", "mapped_dec_layers", ",", "attn_type", "=", "'self_attn'", ",", "metric_name", "=", "'dec_self_attn_loss'", ",", ")", "enc_dec_attn_loss", "=", "self", ".", "_get_and_record_component_attention_loss", "(", "student_attention_matrices", "=", "fwd_pass", ".", "student_attention_matrices", "[", "'decoder'", "]", ",", "teacher_attention_matrices", "=", "fwd_pass", ".", "teacher_attention_matrices", "[", "'decoder'", "]", ",", "mask", "=", "fwd_pass", ".", "mask", ",", "tokens_per_example", "=", "fwd_pass", ".", "tokens_per_example", ",", "num_tokens", "=", "fwd_pass", ".", "num_tokens", ",", "mapped_layers", "=", "self", ".", "mapped_dec_layers", ",", "attn_type", "=", "'encoder_attn'", ",", "metric_name", "=", "'enc_dec_attn_loss'", ",", ")", "return", "enc_self_attn_loss", ",", "dec_self_attn_loss", ",", "enc_dec_attn_loss" ]
[ 610, 4 ]
[ 649, 72 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_and_record_component_attention_loss
( self, teacher_attention_matrices: List[Dict[str, torch.Tensor]], student_attention_matrices: List[Dict[str, torch.Tensor]], mask: torch.BoolTensor, tokens_per_example: torch.Tensor, num_tokens: torch.Tensor, mapped_layers: List[int], attn_type: str, metric_name: str, )
Calculate the given attention loss and register it as the given metric name.
Calculate the given attention loss and register it as the given metric name.
def _get_and_record_component_attention_loss( self, teacher_attention_matrices: List[Dict[str, torch.Tensor]], student_attention_matrices: List[Dict[str, torch.Tensor]], mask: torch.BoolTensor, tokens_per_example: torch.Tensor, num_tokens: torch.Tensor, mapped_layers: List[int], attn_type: str, metric_name: str, ) -> torch.Tensor: """ Calculate the given attention loss and register it as the given metric name. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods # Select the right attention matrices selected_student_attn_matrices = [ layer_matrices[attn_type] for layer_matrices in student_attention_matrices ] selected_teacher_attn_matrices = [ layer_matrices[attn_type] for layer_matrices in teacher_attention_matrices ] batch_size = mask.size(0) per_layer_losses = [] per_layer_per_example_losses = [] for student_layer_idx, teacher_layer_idx in enumerate(mapped_layers): raw_layer_loss = F.mse_loss( input=selected_student_attn_matrices[student_layer_idx], target=selected_teacher_attn_matrices[teacher_layer_idx], reduction='none', ) clamped_layer_loss = torch.clamp(raw_layer_loss, min=0, max=NEAR_INF_FP16) # Prevent infs from appearing in the loss term. Especially important with # fp16 reshaped_layer_loss = clamped_layer_loss.view( batch_size, -1, clamped_layer_loss.size(-2), clamped_layer_loss.size(-1) ) # [batch size, n heads, query length, key length] mean_layer_loss = reshaped_layer_loss.mean(dim=(1, 3)) # Take the mean over the attention heads and the key length assert mean_layer_loss.shape == mask.shape masked_layer_loss = mean_layer_loss * mask layer_loss_per_example = masked_layer_loss.sum(dim=-1) # Sum over token dim layer_loss = masked_layer_loss.div(num_tokens).sum() # Divide before summing over examples so that values don't get too large per_layer_losses.append(layer_loss) per_layer_per_example_losses.append(layer_loss_per_example) attn_loss = torch.stack(per_layer_losses).mean() attn_loss_per_example = torch.stack(per_layer_per_example_losses, dim=1).mean( dim=1 ) # Record metric self.record_local_metric( metric_name, AverageMetric.many(attn_loss_per_example, tokens_per_example) ) return attn_loss
[ "def", "_get_and_record_component_attention_loss", "(", "self", ",", "teacher_attention_matrices", ":", "List", "[", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", "]", ",", "student_attention_matrices", ":", "List", "[", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", "]", ",", "mask", ":", "torch", ".", "BoolTensor", ",", "tokens_per_example", ":", "torch", ".", "Tensor", ",", "num_tokens", ":", "torch", ".", "Tensor", ",", "mapped_layers", ":", "List", "[", "int", "]", ",", "attn_type", ":", "str", ",", "metric_name", ":", "str", ",", ")", "->", "torch", ".", "Tensor", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "# Select the right attention matrices", "selected_student_attn_matrices", "=", "[", "layer_matrices", "[", "attn_type", "]", "for", "layer_matrices", "in", "student_attention_matrices", "]", "selected_teacher_attn_matrices", "=", "[", "layer_matrices", "[", "attn_type", "]", "for", "layer_matrices", "in", "teacher_attention_matrices", "]", "batch_size", "=", "mask", ".", "size", "(", "0", ")", "per_layer_losses", "=", "[", "]", "per_layer_per_example_losses", "=", "[", "]", "for", "student_layer_idx", ",", "teacher_layer_idx", "in", "enumerate", "(", "mapped_layers", ")", ":", "raw_layer_loss", "=", "F", ".", "mse_loss", "(", "input", "=", "selected_student_attn_matrices", "[", "student_layer_idx", "]", ",", "target", "=", "selected_teacher_attn_matrices", "[", "teacher_layer_idx", "]", ",", "reduction", "=", "'none'", ",", ")", "clamped_layer_loss", "=", "torch", ".", "clamp", "(", "raw_layer_loss", ",", "min", "=", "0", ",", "max", "=", "NEAR_INF_FP16", ")", "# Prevent infs from appearing in the loss term. Especially important with", "# fp16", "reshaped_layer_loss", "=", "clamped_layer_loss", ".", "view", "(", "batch_size", ",", "-", "1", ",", "clamped_layer_loss", ".", "size", "(", "-", "2", ")", ",", "clamped_layer_loss", ".", "size", "(", "-", "1", ")", ")", "# [batch size, n heads, query length, key length]", "mean_layer_loss", "=", "reshaped_layer_loss", ".", "mean", "(", "dim", "=", "(", "1", ",", "3", ")", ")", "# Take the mean over the attention heads and the key length", "assert", "mean_layer_loss", ".", "shape", "==", "mask", ".", "shape", "masked_layer_loss", "=", "mean_layer_loss", "*", "mask", "layer_loss_per_example", "=", "masked_layer_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", "# Sum over token dim", "layer_loss", "=", "masked_layer_loss", ".", "div", "(", "num_tokens", ")", ".", "sum", "(", ")", "# Divide before summing over examples so that values don't get too large", "per_layer_losses", ".", "append", "(", "layer_loss", ")", "per_layer_per_example_losses", ".", "append", "(", "layer_loss_per_example", ")", "attn_loss", "=", "torch", ".", "stack", "(", "per_layer_losses", ")", ".", "mean", "(", ")", "attn_loss_per_example", "=", "torch", ".", "stack", "(", "per_layer_per_example_losses", ",", "dim", "=", "1", ")", ".", "mean", "(", "dim", "=", "1", ")", "# Record metric", "self", ".", "record_local_metric", "(", "metric_name", ",", "AverageMetric", ".", "many", "(", "attn_loss_per_example", ",", "tokens_per_example", ")", ")", "return", "attn_loss" ]
[ 651, 4 ]
[ 712, 24 ]
python
en
['en', 'error', 'th']
False
AbstractDistillTransformerAgentMixin._get_prediction_loss
(self, fwd_pass: ForwardPassOutputs)
Calculate and return the KL loss on the teacher's prediction layer. Also record prediction-loss metrics.
Calculate and return the KL loss on the teacher's prediction layer.
def _get_prediction_loss(self, fwd_pass: ForwardPassOutputs) -> torch.Tensor: """ Calculate and return the KL loss on the teacher's prediction layer. Also record prediction-loss metrics. """ assert isinstance(self, TorchGeneratorAgent) # Code relies on methods pred_loss = F.kl_div( F.log_softmax(fwd_pass.student_scores, dim=-1, dtype=torch.float), F.softmax(fwd_pass.teacher_scores, dim=-1, dtype=torch.float), reduction='none', ).type_as(fwd_pass.student_scores) pred_loss = pred_loss.sum(dim=-1) * fwd_pass.mask self.record_local_metric( 'pred_ppl', PPLMetric.many(pred_loss.sum(dim=-1), fwd_pass.tokens_per_example), ) self.record_local_metric( 'pred_loss', AverageMetric.many(pred_loss.sum(dim=-1), fwd_pass.tokens_per_example), ) pred_loss = pred_loss.sum() / fwd_pass.num_tokens return pred_loss
[ "def", "_get_prediction_loss", "(", "self", ",", "fwd_pass", ":", "ForwardPassOutputs", ")", "->", "torch", ".", "Tensor", ":", "assert", "isinstance", "(", "self", ",", "TorchGeneratorAgent", ")", "# Code relies on methods", "pred_loss", "=", "F", ".", "kl_div", "(", "F", ".", "log_softmax", "(", "fwd_pass", ".", "student_scores", ",", "dim", "=", "-", "1", ",", "dtype", "=", "torch", ".", "float", ")", ",", "F", ".", "softmax", "(", "fwd_pass", ".", "teacher_scores", ",", "dim", "=", "-", "1", ",", "dtype", "=", "torch", ".", "float", ")", ",", "reduction", "=", "'none'", ",", ")", ".", "type_as", "(", "fwd_pass", ".", "student_scores", ")", "pred_loss", "=", "pred_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", "*", "fwd_pass", ".", "mask", "self", ".", "record_local_metric", "(", "'pred_ppl'", ",", "PPLMetric", ".", "many", "(", "pred_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", ",", "fwd_pass", ".", "tokens_per_example", ")", ",", ")", "self", ".", "record_local_metric", "(", "'pred_loss'", ",", "AverageMetric", ".", "many", "(", "pred_loss", ".", "sum", "(", "dim", "=", "-", "1", ")", ",", "fwd_pass", ".", "tokens_per_example", ")", ",", ")", "pred_loss", "=", "pred_loss", ".", "sum", "(", ")", "/", "fwd_pass", ".", "num_tokens", "return", "pred_loss" ]
[ 714, 4 ]
[ 737, 24 ]
python
en
['en', 'error', 'th']
False
DistillNarrowTransformerAgentMixin._get_projection_layer
(self, student_model)
Return a projection layer from the student hidden dim to the teacher hidden dim.
Return a projection layer from the student hidden dim to the teacher hidden dim.
def _get_projection_layer(self, student_model): """ Return a projection layer from the student hidden dim to the teacher hidden dim. """ teacher_model = self._get_teacher_model() student_hidden_dim = student_model.encoder.dim teacher_hidden_dim = teacher_model.encoder.dim assert ( student_hidden_dim == student_model.decoder.dim and teacher_hidden_dim == teacher_model.decoder.dim ) layer = nn.Linear(student_hidden_dim, teacher_hidden_dim) # From TinyBERT's BertPreTrainedModel.init_bert_weights() method at # https://github.com/huawei-noah/Pretrained-Language-Model/blob/master/TinyBERT/transformer/modeling.py#L628 layer.weight.data.normal_(mean=0.0, std=0.02) layer.bias.data.zero_() return layer
[ "def", "_get_projection_layer", "(", "self", ",", "student_model", ")", ":", "teacher_model", "=", "self", ".", "_get_teacher_model", "(", ")", "student_hidden_dim", "=", "student_model", ".", "encoder", ".", "dim", "teacher_hidden_dim", "=", "teacher_model", ".", "encoder", ".", "dim", "assert", "(", "student_hidden_dim", "==", "student_model", ".", "decoder", ".", "dim", "and", "teacher_hidden_dim", "==", "teacher_model", ".", "decoder", ".", "dim", ")", "layer", "=", "nn", ".", "Linear", "(", "student_hidden_dim", ",", "teacher_hidden_dim", ")", "# From TinyBERT's BertPreTrainedModel.init_bert_weights() method at", "# https://github.com/huawei-noah/Pretrained-Language-Model/blob/master/TinyBERT/transformer/modeling.py#L628", "layer", ".", "weight", ".", "data", ".", "normal_", "(", "mean", "=", "0.0", ",", "std", "=", "0.02", ")", "layer", ".", "bias", ".", "data", ".", "zero_", "(", ")", "return", "layer" ]
[ 891, 4 ]
[ 912, 20 ]
python
en
['en', 'error', 'th']
False
DistillTransformerAgent.add_cmdline_args
( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None )
Add command-line arguments specifically for this agent.
Add command-line arguments specifically for this agent.
def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command-line arguments specifically for this agent. """ DistillTransformerAgentMixin.add_cmdline_args(parser, partial_opt=partial_opt) TransformerGeneratorAgent.add_cmdline_args(parser, partial_opt=partial_opt) return parser
[ "def", "add_cmdline_args", "(", "cls", ",", "parser", ":", "ParlaiParser", ",", "partial_opt", ":", "Optional", "[", "Opt", "]", "=", "None", ")", "->", "ParlaiParser", ":", "DistillTransformerAgentMixin", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "TransformerGeneratorAgent", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "return", "parser" ]
[ 983, 4 ]
[ 991, 21 ]
python
en
['en', 'error', 'th']
False
DistillNarrowTransformerAgent.add_cmdline_args
( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None )
Add command-line arguments specifically for this agent.
Add command-line arguments specifically for this agent.
def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command-line arguments specifically for this agent. """ DistillNarrowTransformerAgentMixin.add_cmdline_args( parser, partial_opt=partial_opt ) TransformerGeneratorAgent.add_cmdline_args(parser, partial_opt=partial_opt) return parser
[ "def", "add_cmdline_args", "(", "cls", ",", "parser", ":", "ParlaiParser", ",", "partial_opt", ":", "Optional", "[", "Opt", "]", "=", "None", ")", "->", "ParlaiParser", ":", "DistillNarrowTransformerAgentMixin", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "TransformerGeneratorAgent", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "return", "parser" ]
[ 998, 4 ]
[ 1008, 21 ]
python
en
['en', 'error', 'th']
False
BartLikeAgent._manipulate_mask
( self, mask: torch.BoolTensor, student_scores: torch.Tensor, batch: Batch )
Add one extra (masked-out) token to the mask, for compatibility with BART.
Add one extra (masked-out) token to the mask, for compatibility with BART.
def _manipulate_mask( self, mask: torch.BoolTensor, student_scores: torch.Tensor, batch: Batch ) -> torch.BoolTensor: """ Add one extra (masked-out) token to the mask, for compatibility with BART. """ assert student_scores.size(1) == batch.label_vec.size(1) + 1 mask = torch.cat([mask.new_zeros([mask.size(0), 1]), mask], dim=1) return mask
[ "def", "_manipulate_mask", "(", "self", ",", "mask", ":", "torch", ".", "BoolTensor", ",", "student_scores", ":", "torch", ".", "Tensor", ",", "batch", ":", "Batch", ")", "->", "torch", ".", "BoolTensor", ":", "assert", "student_scores", ".", "size", "(", "1", ")", "==", "batch", ".", "label_vec", ".", "size", "(", "1", ")", "+", "1", "mask", "=", "torch", ".", "cat", "(", "[", "mask", ".", "new_zeros", "(", "[", "mask", ".", "size", "(", "0", ")", ",", "1", "]", ")", ",", "mask", "]", ",", "dim", "=", "1", ")", "return", "mask" ]
[ 1020, 4 ]
[ 1028, 19 ]
python
en
['en', 'error', 'th']
False
DistillBartAgent.add_cmdline_args
( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None )
Add command-line arguments specifically for this agent.
Add command-line arguments specifically for this agent.
def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command-line arguments specifically for this agent. """ DistillTransformerAgentMixin.add_cmdline_args(parser, partial_opt=partial_opt) BartLikeAgent.add_cmdline_args(parser, partial_opt=partial_opt) return parser
[ "def", "add_cmdline_args", "(", "cls", ",", "parser", ":", "ParlaiParser", ",", "partial_opt", ":", "Optional", "[", "Opt", "]", "=", "None", ")", "->", "ParlaiParser", ":", "DistillTransformerAgentMixin", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "BartLikeAgent", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "return", "parser" ]
[ 1033, 4 ]
[ 1041, 21 ]
python
en
['en', 'error', 'th']
False
DistillNarrowBartAgent.add_cmdline_args
( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None )
Add command-line arguments specifically for this agent.
Add command-line arguments specifically for this agent.
def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command-line arguments specifically for this agent. """ DistillNarrowTransformerAgentMixin.add_cmdline_args( parser, partial_opt=partial_opt ) BartLikeAgent.add_cmdline_args(parser, partial_opt=partial_opt) return parser
[ "def", "add_cmdline_args", "(", "cls", ",", "parser", ":", "ParlaiParser", ",", "partial_opt", ":", "Optional", "[", "Opt", "]", "=", "None", ")", "->", "ParlaiParser", ":", "DistillNarrowTransformerAgentMixin", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "BartLikeAgent", ".", "add_cmdline_args", "(", "parser", ",", "partial_opt", "=", "partial_opt", ")", "return", "parser" ]
[ 1046, 4 ]
[ 1056, 21 ]
python
en
['en', 'error', 'th']
False
verify_or_create_SSL_key_and_cert
(keyfile, certfile)
Verify or create new key/certificate files. Args: keyfile (str): Path to ssl.key file. certfile (str): Parth to ssl.cert file. Notes: If files don't already exist, they are created.
Verify or create new key/certificate files.
def verify_or_create_SSL_key_and_cert(keyfile, certfile): """ Verify or create new key/certificate files. Args: keyfile (str): Path to ssl.key file. certfile (str): Parth to ssl.cert file. Notes: If files don't already exist, they are created. """ if not (os.path.exists(keyfile) and os.path.exists(certfile)): # key/cert does not exist. Create. try: # generate the keypair keypair = crypto.PKey() keypair.generate_key(crypto.TYPE_RSA, _PRIVATE_KEY_LENGTH) with open(_PRIVATE_KEY_FILE, 'wt') as pfile: pfile.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, keypair)) print("Created SSL private key in '{}'.".format(_PRIVATE_KEY_FILE)) with open(_PUBLIC_KEY_FILE, 'wt') as pfile: pfile.write(crypto.dump_publickey(crypto.FILETYPE_PEM, keypair)) print("Created SSL public key in '{}'.".format(_PUBLIC_KEY_FILE)) except Exception as err: print(NO_AUTOGEN.format(err=err)) return False else: try: # create certificate cert = crypto.X509() subj = cert.get_subject() for key, value in _CERTIFICATE_ISSUER.items(): setattr(subj, key, value) cert.set_issuer(subj) cert.set_serial_number(1000) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(_CERTIFICATE_EXPIRE) cert.set_pubkey(keypair) cert.sign(keypair, 'sha1') with open(_CERTIFICATE_FILE, 'wt') as cfile: cfile.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) print("Created SSL certificate in '{}'.".format(_CERTIFICATE_FILE)) except Exception as err: print(NO_AUTOCERT.format(err=err)) return False return True
[ "def", "verify_or_create_SSL_key_and_cert", "(", "keyfile", ",", "certfile", ")", ":", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "keyfile", ")", "and", "os", ".", "path", ".", "exists", "(", "certfile", ")", ")", ":", "# key/cert does not exist. Create.", "try", ":", "# generate the keypair", "keypair", "=", "crypto", ".", "PKey", "(", ")", "keypair", ".", "generate_key", "(", "crypto", ".", "TYPE_RSA", ",", "_PRIVATE_KEY_LENGTH", ")", "with", "open", "(", "_PRIVATE_KEY_FILE", ",", "'wt'", ")", "as", "pfile", ":", "pfile", ".", "write", "(", "crypto", ".", "dump_privatekey", "(", "crypto", ".", "FILETYPE_PEM", ",", "keypair", ")", ")", "print", "(", "\"Created SSL private key in '{}'.\"", ".", "format", "(", "_PRIVATE_KEY_FILE", ")", ")", "with", "open", "(", "_PUBLIC_KEY_FILE", ",", "'wt'", ")", "as", "pfile", ":", "pfile", ".", "write", "(", "crypto", ".", "dump_publickey", "(", "crypto", ".", "FILETYPE_PEM", ",", "keypair", ")", ")", "print", "(", "\"Created SSL public key in '{}'.\"", ".", "format", "(", "_PUBLIC_KEY_FILE", ")", ")", "except", "Exception", "as", "err", ":", "print", "(", "NO_AUTOGEN", ".", "format", "(", "err", "=", "err", ")", ")", "return", "False", "else", ":", "try", ":", "# create certificate", "cert", "=", "crypto", ".", "X509", "(", ")", "subj", "=", "cert", ".", "get_subject", "(", ")", "for", "key", ",", "value", "in", "_CERTIFICATE_ISSUER", ".", "items", "(", ")", ":", "setattr", "(", "subj", ",", "key", ",", "value", ")", "cert", ".", "set_issuer", "(", "subj", ")", "cert", ".", "set_serial_number", "(", "1000", ")", "cert", ".", "gmtime_adj_notBefore", "(", "0", ")", "cert", ".", "gmtime_adj_notAfter", "(", "_CERTIFICATE_EXPIRE", ")", "cert", ".", "set_pubkey", "(", "keypair", ")", "cert", ".", "sign", "(", "keypair", ",", "'sha1'", ")", "with", "open", "(", "_CERTIFICATE_FILE", ",", "'wt'", ")", "as", "cfile", ":", "cfile", ".", "write", "(", "crypto", ".", "dump_certificate", "(", "crypto", ".", "FILETYPE_PEM", ",", "cert", ")", ")", "print", "(", "\"Created SSL certificate in '{}'.\"", ".", "format", "(", "_CERTIFICATE_FILE", ")", ")", "except", "Exception", "as", "err", ":", "print", "(", "NO_AUTOCERT", ".", "format", "(", "err", "=", "err", ")", ")", "return", "False", "return", "True" ]
[ 72, 0 ]
[ 128, 15 ]
python
en
['en', 'error', 'th']
False
getSSLContext
()
This is called by the portal when creating the SSL context server-side. Returns: ssl_context (tuple): A key and certificate that is either existing previously or created on the fly.
This is called by the portal when creating the SSL context server-side.
def getSSLContext(): """ This is called by the portal when creating the SSL context server-side. Returns: ssl_context (tuple): A key and certificate that is either existing previously or created on the fly. """ if verify_or_create_SSL_key_and_cert(_PRIVATE_KEY_FILE, _CERTIFICATE_FILE): return twisted_ssl.DefaultOpenSSLContextFactory(_PRIVATE_KEY_FILE, _CERTIFICATE_FILE) else: return None
[ "def", "getSSLContext", "(", ")", ":", "if", "verify_or_create_SSL_key_and_cert", "(", "_PRIVATE_KEY_FILE", ",", "_CERTIFICATE_FILE", ")", ":", "return", "twisted_ssl", ".", "DefaultOpenSSLContextFactory", "(", "_PRIVATE_KEY_FILE", ",", "_CERTIFICATE_FILE", ")", "else", ":", "return", "None" ]
[ 131, 0 ]
[ 145, 19 ]
python
en
['en', 'error', 'th']
False
iso_to_plotly_time_string
(iso_string)
Remove timezone info and replace 'T' delimeter with ' ' (ws).
Remove timezone info and replace 'T' delimeter with ' ' (ws).
def iso_to_plotly_time_string(iso_string): """Remove timezone info and replace 'T' delimeter with ' ' (ws).""" # make sure we don't send timezone info to plotly if (iso_string.split("-")[:3] == "00:00") or (iso_string.split("+")[0] == "00:00"): raise Exception( "Plotly won't accept timestrings with timezone info.\n" "All timestrings are assumed to be in UTC." ) iso_string = iso_string.replace("-00:00", "").replace("+00:00", "") if iso_string.endswith("T00:00:00"): return iso_string.replace("T00:00:00", "") else: return iso_string.replace("T", " ")
[ "def", "iso_to_plotly_time_string", "(", "iso_string", ")", ":", "# make sure we don't send timezone info to plotly", "if", "(", "iso_string", ".", "split", "(", "\"-\"", ")", "[", ":", "3", "]", "==", "\"00:00\"", ")", "or", "(", "iso_string", ".", "split", "(", "\"+\"", ")", "[", "0", "]", "==", "\"00:00\"", ")", ":", "raise", "Exception", "(", "\"Plotly won't accept timestrings with timezone info.\\n\"", "\"All timestrings are assumed to be in UTC.\"", ")", "iso_string", "=", "iso_string", ".", "replace", "(", "\"-00:00\"", ",", "\"\"", ")", ".", "replace", "(", "\"+00:00\"", ",", "\"\"", ")", "if", "iso_string", ".", "endswith", "(", "\"T00:00:00\"", ")", ":", "return", "iso_string", ".", "replace", "(", "\"T00:00:00\"", ",", "\"\"", ")", "else", ":", "return", "iso_string", ".", "replace", "(", "\"T\"", ",", "\" \"", ")" ]
[ 210, 0 ]
[ 224, 43 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.coerce_to_strict
(self, const)
This is used to ultimately *encode* into strict JSON, see `encode`
This is used to ultimately *encode* into strict JSON, see `encode`
def coerce_to_strict(self, const): """ This is used to ultimately *encode* into strict JSON, see `encode` """ # before python 2.7, 'true', 'false', 'null', were include here. if const in ("Infinity", "-Infinity", "NaN"): return None else: return const
[ "def", "coerce_to_strict", "(", "self", ",", "const", ")", ":", "# before python 2.7, 'true', 'false', 'null', were include here.", "if", "const", "in", "(", "\"Infinity\"", ",", "\"-Infinity\"", ",", "\"NaN\"", ")", ":", "return", "None", "else", ":", "return", "const" ]
[ 24, 4 ]
[ 33, 24 ]
python
en
['en', 'error', 'th']
False
PlotlyJSONEncoder.encode
(self, o)
Load and then dump the result using parse_constant kwarg Note that setting invalid separators will cause a failure at this step.
Load and then dump the result using parse_constant kwarg
def encode(self, o): """ Load and then dump the result using parse_constant kwarg Note that setting invalid separators will cause a failure at this step. """ # this will raise errors in a normal-expected way encoded_o = super(PlotlyJSONEncoder, self).encode(o) # now: # 1. `loads` to switch Infinity, -Infinity, NaN to None # 2. `dumps` again so you get 'null' instead of extended JSON try: new_o = _json.loads(encoded_o, parse_constant=self.coerce_to_strict) except ValueError: # invalid separators will fail here. raise a helpful exception raise ValueError( "Encoding into strict JSON failed. Did you set the separators " "valid JSON separators?" ) else: return _json.dumps( new_o, sort_keys=self.sort_keys, indent=self.indent, separators=(self.item_separator, self.key_separator), )
[ "def", "encode", "(", "self", ",", "o", ")", ":", "# this will raise errors in a normal-expected way", "encoded_o", "=", "super", "(", "PlotlyJSONEncoder", ",", "self", ")", ".", "encode", "(", "o", ")", "# now:", "# 1. `loads` to switch Infinity, -Infinity, NaN to None", "# 2. `dumps` again so you get 'null' instead of extended JSON", "try", ":", "new_o", "=", "_json", ".", "loads", "(", "encoded_o", ",", "parse_constant", "=", "self", ".", "coerce_to_strict", ")", "except", "ValueError", ":", "# invalid separators will fail here. raise a helpful exception", "raise", "ValueError", "(", "\"Encoding into strict JSON failed. Did you set the separators \"", "\"valid JSON separators?\"", ")", "else", ":", "return", "_json", ".", "dumps", "(", "new_o", ",", "sort_keys", "=", "self", ".", "sort_keys", ",", "indent", "=", "self", ".", "indent", ",", "separators", "=", "(", "self", ".", "item_separator", ",", "self", ".", "key_separator", ")", ",", ")" ]
[ 35, 4 ]
[ 64, 13 ]
python
en
['en', 'error', 'th']
False
PlotlyJSONEncoder.default
(self, obj)
Accept an object (of unknown type) and try to encode with priority: 1. builtin: user-defined objects 2. sage: sage math cloud 3. pandas: dataframes/series 4. numpy: ndarrays 5. datetime: time/datetime objects Each method throws a NotEncoded exception if it fails. The default method will only get hit if the object is not a type that is naturally encoded by json: Normal objects: dict object list, tuple array str, unicode string int, long, float number True true False false None null Extended objects: float('nan') 'NaN' float('infinity') 'Infinity' float('-infinity') '-Infinity' Therefore, we only anticipate either unknown iterables or values here.
Accept an object (of unknown type) and try to encode with priority: 1. builtin: user-defined objects 2. sage: sage math cloud 3. pandas: dataframes/series 4. numpy: ndarrays 5. datetime: time/datetime objects
def default(self, obj): """ Accept an object (of unknown type) and try to encode with priority: 1. builtin: user-defined objects 2. sage: sage math cloud 3. pandas: dataframes/series 4. numpy: ndarrays 5. datetime: time/datetime objects Each method throws a NotEncoded exception if it fails. The default method will only get hit if the object is not a type that is naturally encoded by json: Normal objects: dict object list, tuple array str, unicode string int, long, float number True true False false None null Extended objects: float('nan') 'NaN' float('infinity') 'Infinity' float('-infinity') '-Infinity' Therefore, we only anticipate either unknown iterables or values here. """ # TODO: The ordering if these methods is *very* important. Is this OK? encoding_methods = ( self.encode_as_plotly, self.encode_as_sage, self.encode_as_numpy, self.encode_as_pandas, self.encode_as_datetime, self.encode_as_date, self.encode_as_list, # because some values have `tolist` do last. self.encode_as_decimal, self.encode_as_pil, ) for encoding_method in encoding_methods: try: return encoding_method(obj) except NotEncodable: pass return _json.JSONEncoder.default(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "# TODO: The ordering if these methods is *very* important. Is this OK?", "encoding_methods", "=", "(", "self", ".", "encode_as_plotly", ",", "self", ".", "encode_as_sage", ",", "self", ".", "encode_as_numpy", ",", "self", ".", "encode_as_pandas", ",", "self", ".", "encode_as_datetime", ",", "self", ".", "encode_as_date", ",", "self", ".", "encode_as_list", ",", "# because some values have `tolist` do last.", "self", ".", "encode_as_decimal", ",", "self", ".", "encode_as_pil", ",", ")", "for", "encoding_method", "in", "encoding_methods", ":", "try", ":", "return", "encoding_method", "(", "obj", ")", "except", "NotEncodable", ":", "pass", "return", "_json", ".", "JSONEncoder", ".", "default", "(", "self", ",", "obj", ")" ]
[ 66, 4 ]
[ 114, 51 ]
python
en
['en', 'error', 'th']
False
PlotlyJSONEncoder.encode_as_plotly
(obj)
Attempt to use a builtin `to_plotly_json` method.
Attempt to use a builtin `to_plotly_json` method.
def encode_as_plotly(obj): """Attempt to use a builtin `to_plotly_json` method.""" try: return obj.to_plotly_json() except AttributeError: raise NotEncodable
[ "def", "encode_as_plotly", "(", "obj", ")", ":", "try", ":", "return", "obj", ".", "to_plotly_json", "(", ")", "except", "AttributeError", ":", "raise", "NotEncodable" ]
[ 117, 4 ]
[ 122, 30 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_list
(obj)
Attempt to use `tolist` method to convert to normal Python list.
Attempt to use `tolist` method to convert to normal Python list.
def encode_as_list(obj): """Attempt to use `tolist` method to convert to normal Python list.""" if hasattr(obj, "tolist"): return obj.tolist() else: raise NotEncodable
[ "def", "encode_as_list", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "\"tolist\"", ")", ":", "return", "obj", ".", "tolist", "(", ")", "else", ":", "raise", "NotEncodable" ]
[ 125, 4 ]
[ 130, 30 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_sage
(obj)
Attempt to convert sage.all.RR to floats and sage.all.ZZ to ints
Attempt to convert sage.all.RR to floats and sage.all.ZZ to ints
def encode_as_sage(obj): """Attempt to convert sage.all.RR to floats and sage.all.ZZ to ints""" sage_all = get_module("sage.all") if not sage_all: raise NotEncodable if obj in sage_all.RR: return float(obj) elif obj in sage_all.ZZ: return int(obj) else: raise NotEncodable
[ "def", "encode_as_sage", "(", "obj", ")", ":", "sage_all", "=", "get_module", "(", "\"sage.all\"", ")", "if", "not", "sage_all", ":", "raise", "NotEncodable", "if", "obj", "in", "sage_all", ".", "RR", ":", "return", "float", "(", "obj", ")", "elif", "obj", "in", "sage_all", ".", "ZZ", ":", "return", "int", "(", "obj", ")", "else", ":", "raise", "NotEncodable" ]
[ 133, 4 ]
[ 144, 30 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_pandas
(obj)
Attempt to convert pandas.NaT
Attempt to convert pandas.NaT
def encode_as_pandas(obj): """Attempt to convert pandas.NaT""" pandas = get_module("pandas", should_load=False) if not pandas: raise NotEncodable if obj is pandas.NaT: return None else: raise NotEncodable
[ "def", "encode_as_pandas", "(", "obj", ")", ":", "pandas", "=", "get_module", "(", "\"pandas\"", ",", "should_load", "=", "False", ")", "if", "not", "pandas", ":", "raise", "NotEncodable", "if", "obj", "is", "pandas", ".", "NaT", ":", "return", "None", "else", ":", "raise", "NotEncodable" ]
[ 147, 4 ]
[ 156, 30 ]
python
en
['en', 'lb', 'en']
True
PlotlyJSONEncoder.encode_as_numpy
(obj)
Attempt to convert numpy.ma.core.masked
Attempt to convert numpy.ma.core.masked
def encode_as_numpy(obj): """Attempt to convert numpy.ma.core.masked""" numpy = get_module("numpy", should_load=False) if not numpy: raise NotEncodable if obj is numpy.ma.core.masked: return float("nan") else: raise NotEncodable
[ "def", "encode_as_numpy", "(", "obj", ")", ":", "numpy", "=", "get_module", "(", "\"numpy\"", ",", "should_load", "=", "False", ")", "if", "not", "numpy", ":", "raise", "NotEncodable", "if", "obj", "is", "numpy", ".", "ma", ".", "core", ".", "masked", ":", "return", "float", "(", "\"nan\"", ")", "else", ":", "raise", "NotEncodable" ]
[ 159, 4 ]
[ 168, 30 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_datetime
(obj)
Convert datetime objects to iso-format strings
Convert datetime objects to iso-format strings
def encode_as_datetime(obj): """Convert datetime objects to iso-format strings""" try: return obj.isoformat() except AttributeError: raise NotEncodable
[ "def", "encode_as_datetime", "(", "obj", ")", ":", "try", ":", "return", "obj", ".", "isoformat", "(", ")", "except", "AttributeError", ":", "raise", "NotEncodable" ]
[ 171, 4 ]
[ 176, 30 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_date
(obj)
Attempt to convert to utc-iso time string using date methods.
Attempt to convert to utc-iso time string using date methods.
def encode_as_date(obj): """Attempt to convert to utc-iso time string using date methods.""" try: time_string = obj.isoformat() except AttributeError: raise NotEncodable else: return iso_to_plotly_time_string(time_string)
[ "def", "encode_as_date", "(", "obj", ")", ":", "try", ":", "time_string", "=", "obj", ".", "isoformat", "(", ")", "except", "AttributeError", ":", "raise", "NotEncodable", "else", ":", "return", "iso_to_plotly_time_string", "(", "time_string", ")" ]
[ 179, 4 ]
[ 186, 57 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_decimal
(obj)
Attempt to encode decimal by converting it to float
Attempt to encode decimal by converting it to float
def encode_as_decimal(obj): """Attempt to encode decimal by converting it to float""" if isinstance(obj, decimal.Decimal): return float(obj) else: raise NotEncodable
[ "def", "encode_as_decimal", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "decimal", ".", "Decimal", ")", ":", "return", "float", "(", "obj", ")", "else", ":", "raise", "NotEncodable" ]
[ 189, 4 ]
[ 194, 30 ]
python
en
['en', 'en', 'en']
True
PlotlyJSONEncoder.encode_as_pil
(obj)
Attempt to convert PIL.Image.Image to base64 data uri
Attempt to convert PIL.Image.Image to base64 data uri
def encode_as_pil(obj): """Attempt to convert PIL.Image.Image to base64 data uri""" image = get_module("PIL.Image") if image is not None and isinstance(obj, image.Image): return ImageUriValidator.pil_image_to_uri(obj) else: raise NotEncodable
[ "def", "encode_as_pil", "(", "obj", ")", ":", "image", "=", "get_module", "(", "\"PIL.Image\"", ")", "if", "image", "is", "not", "None", "and", "isinstance", "(", "obj", ",", "image", ".", "Image", ")", ":", "return", "ImageUriValidator", ".", "pil_image_to_uri", "(", "obj", ")", "else", ":", "raise", "NotEncodable" ]
[ 197, 4 ]
[ 203, 30 ]
python
en
['en', 'en', 'en']
True
_get_config_directory
()
Find the predefined detector config directory.
Find the predefined detector config directory.
def _get_config_directory(): """Find the predefined detector config directory.""" try: # Assume we are running in the source mmdetection3d repo repo_dpath = dirname(dirname(dirname(dirname(__file__)))) except NameError: # For IPython development when this __file__ is not defined import mmdet3d repo_dpath = dirname(dirname(mmdet3d.__file__)) config_dpath = join(repo_dpath, 'configs') if not exists(config_dpath): raise Exception('Cannot find config path') return config_dpath
[ "def", "_get_config_directory", "(", ")", ":", "try", ":", "# Assume we are running in the source mmdetection3d repo", "repo_dpath", "=", "dirname", "(", "dirname", "(", "dirname", "(", "dirname", "(", "__file__", ")", ")", ")", ")", "except", "NameError", ":", "# For IPython development when this __file__ is not defined", "import", "mmdet3d", "repo_dpath", "=", "dirname", "(", "dirname", "(", "mmdet3d", ".", "__file__", ")", ")", "config_dpath", "=", "join", "(", "repo_dpath", ",", "'configs'", ")", "if", "not", "exists", "(", "config_dpath", ")", ":", "raise", "Exception", "(", "'Cannot find config path'", ")", "return", "config_dpath" ]
[ 21, 0 ]
[ 33, 23 ]
python
en
['en', 'en', 'en']
True
_get_config_module
(fname)
Load a configuration as a python module.
Load a configuration as a python module.
def _get_config_module(fname): """Load a configuration as a python module.""" from mmcv import Config config_dpath = _get_config_directory() config_fpath = join(config_dpath, fname) config_mod = Config.fromfile(config_fpath) return config_mod
[ "def", "_get_config_module", "(", "fname", ")", ":", "from", "mmcv", "import", "Config", "config_dpath", "=", "_get_config_directory", "(", ")", "config_fpath", "=", "join", "(", "config_dpath", ",", "fname", ")", "config_mod", "=", "Config", ".", "fromfile", "(", "config_fpath", ")", "return", "config_mod" ]
[ 36, 0 ]
[ 42, 21 ]
python
en
['en', 'fr', 'en']
True
_get_head_cfg
(fname)
Grab configs necessary to create a bbox_head. These are deep copied to allow for safe modification of parameters without influencing other tests.
Grab configs necessary to create a bbox_head.
def _get_head_cfg(fname): """Grab configs necessary to create a bbox_head. These are deep copied to allow for safe modification of parameters without influencing other tests. """ import mmcv config = _get_config_module(fname) model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg)) test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg)) bbox_head = model.bbox_head bbox_head.update(train_cfg=train_cfg) bbox_head.update(test_cfg=test_cfg) return bbox_head
[ "def", "_get_head_cfg", "(", "fname", ")", ":", "import", "mmcv", "config", "=", "_get_config_module", "(", "fname", ")", "model", "=", "copy", ".", "deepcopy", "(", "config", ".", "model", ")", "train_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "train_cfg", ")", ")", "test_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "test_cfg", ")", ")", "bbox_head", "=", "model", ".", "bbox_head", "bbox_head", ".", "update", "(", "train_cfg", "=", "train_cfg", ")", "bbox_head", ".", "update", "(", "test_cfg", "=", "test_cfg", ")", "return", "bbox_head" ]
[ 45, 0 ]
[ 60, 20 ]
python
en
['en', 'en', 'en']
True
_get_rpn_head_cfg
(fname)
Grab configs necessary to create a rpn_head. These are deep copied to allow for safe modification of parameters without influencing other tests.
Grab configs necessary to create a rpn_head.
def _get_rpn_head_cfg(fname): """Grab configs necessary to create a rpn_head. These are deep copied to allow for safe modification of parameters without influencing other tests. """ import mmcv config = _get_config_module(fname) model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg)) test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg)) rpn_head = model.rpn_head rpn_head.update(train_cfg=train_cfg.rpn) rpn_head.update(test_cfg=test_cfg.rpn) return rpn_head, train_cfg.rpn_proposal
[ "def", "_get_rpn_head_cfg", "(", "fname", ")", ":", "import", "mmcv", "config", "=", "_get_config_module", "(", "fname", ")", "model", "=", "copy", ".", "deepcopy", "(", "config", ".", "model", ")", "train_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "train_cfg", ")", ")", "test_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "test_cfg", ")", ")", "rpn_head", "=", "model", ".", "rpn_head", "rpn_head", ".", "update", "(", "train_cfg", "=", "train_cfg", ".", "rpn", ")", "rpn_head", ".", "update", "(", "test_cfg", "=", "test_cfg", ".", "rpn", ")", "return", "rpn_head", ",", "train_cfg", ".", "rpn_proposal" ]
[ 63, 0 ]
[ 78, 43 ]
python
en
['en', 'en', 'en']
True
_get_roi_head_cfg
(fname)
Grab configs necessary to create a roi_head. These are deep copied to allow for safe modification of parameters without influencing other tests.
Grab configs necessary to create a roi_head.
def _get_roi_head_cfg(fname): """Grab configs necessary to create a roi_head. These are deep copied to allow for safe modification of parameters without influencing other tests. """ import mmcv config = _get_config_module(fname) model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg)) test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg)) roi_head = model.roi_head roi_head.update(train_cfg=train_cfg.rcnn) roi_head.update(test_cfg=test_cfg.rcnn) return roi_head
[ "def", "_get_roi_head_cfg", "(", "fname", ")", ":", "import", "mmcv", "config", "=", "_get_config_module", "(", "fname", ")", "model", "=", "copy", ".", "deepcopy", "(", "config", ".", "model", ")", "train_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "train_cfg", ")", ")", "test_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "test_cfg", ")", ")", "roi_head", "=", "model", ".", "roi_head", "roi_head", ".", "update", "(", "train_cfg", "=", "train_cfg", ".", "rcnn", ")", "roi_head", ".", "update", "(", "test_cfg", "=", "test_cfg", ".", "rcnn", ")", "return", "roi_head" ]
[ 81, 0 ]
[ 96, 19 ]
python
en
['en', 'gd', 'en']
True
_get_pts_bbox_head_cfg
(fname)
Grab configs necessary to create a pts_bbox_head. These are deep copied to allow for safe modification of parameters without influencing other tests.
Grab configs necessary to create a pts_bbox_head.
def _get_pts_bbox_head_cfg(fname): """Grab configs necessary to create a pts_bbox_head. These are deep copied to allow for safe modification of parameters without influencing other tests. """ import mmcv config = _get_config_module(fname) model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg.pts)) test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg.pts)) pts_bbox_head = model.pts_bbox_head pts_bbox_head.update(train_cfg=train_cfg) pts_bbox_head.update(test_cfg=test_cfg) return pts_bbox_head
[ "def", "_get_pts_bbox_head_cfg", "(", "fname", ")", ":", "import", "mmcv", "config", "=", "_get_config_module", "(", "fname", ")", "model", "=", "copy", ".", "deepcopy", "(", "config", ".", "model", ")", "train_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "train_cfg", ".", "pts", ")", ")", "test_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "test_cfg", ".", "pts", ")", ")", "pts_bbox_head", "=", "model", ".", "pts_bbox_head", "pts_bbox_head", ".", "update", "(", "train_cfg", "=", "train_cfg", ")", "pts_bbox_head", ".", "update", "(", "test_cfg", "=", "test_cfg", ")", "return", "pts_bbox_head" ]
[ 99, 0 ]
[ 114, 24 ]
python
en
['en', 'en', 'en']
True
_get_vote_head_cfg
(fname)
Grab configs necessary to create a vote_head. These are deep copied to allow for safe modification of parameters without influencing other tests.
Grab configs necessary to create a vote_head.
def _get_vote_head_cfg(fname): """Grab configs necessary to create a vote_head. These are deep copied to allow for safe modification of parameters without influencing other tests. """ import mmcv config = _get_config_module(fname) model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg)) test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg)) vote_head = model.bbox_head vote_head.update(train_cfg=train_cfg) vote_head.update(test_cfg=test_cfg) return vote_head
[ "def", "_get_vote_head_cfg", "(", "fname", ")", ":", "import", "mmcv", "config", "=", "_get_config_module", "(", "fname", ")", "model", "=", "copy", ".", "deepcopy", "(", "config", ".", "model", ")", "train_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "train_cfg", ")", ")", "test_cfg", "=", "mmcv", ".", "Config", "(", "copy", ".", "deepcopy", "(", "config", ".", "model", ".", "test_cfg", ")", ")", "vote_head", "=", "model", ".", "bbox_head", "vote_head", ".", "update", "(", "train_cfg", "=", "train_cfg", ")", "vote_head", ".", "update", "(", "test_cfg", "=", "test_cfg", ")", "return", "vote_head" ]
[ 117, 0 ]
[ 132, 20 ]
python
en
['en', 'it', 'en']
True
_get_parta2_bbox_head_cfg
(fname)
Grab configs necessary to create a parta2_bbox_head. These are deep copied to allow for safe modification of parameters without influencing other tests.
Grab configs necessary to create a parta2_bbox_head.
def _get_parta2_bbox_head_cfg(fname): """Grab configs necessary to create a parta2_bbox_head. These are deep copied to allow for safe modification of parameters without influencing other tests. """ config = _get_config_module(fname) model = copy.deepcopy(config.model) vote_head = model.roi_head.bbox_head return vote_head
[ "def", "_get_parta2_bbox_head_cfg", "(", "fname", ")", ":", "config", "=", "_get_config_module", "(", "fname", ")", "model", "=", "copy", ".", "deepcopy", "(", "config", ".", "model", ")", "vote_head", "=", "model", ".", "roi_head", ".", "bbox_head", "return", "vote_head" ]
[ 135, 0 ]
[ 145, 20 ]
python
en
['en', 'it', 'en']
True
inter
(rbbox1, rbbox2)
Compute intersection of two rotated boxes. Args: rbox1 (np.ndarray, shape=[5]): Rotated 2d box. rbox2 (np.ndarray, shape=[5]): Rotated 2d box. Returns: float: Intersection of two rotated boxes.
Compute intersection of two rotated boxes.
def inter(rbbox1, rbbox2): """Compute intersection of two rotated boxes. Args: rbox1 (np.ndarray, shape=[5]): Rotated 2d box. rbox2 (np.ndarray, shape=[5]): Rotated 2d box. Returns: float: Intersection of two rotated boxes. """ corners1 = cuda.local.array((8, ), dtype=numba.float32) corners2 = cuda.local.array((8, ), dtype=numba.float32) intersection_corners = cuda.local.array((16, ), dtype=numba.float32) rbbox_to_corners(corners1, rbbox1) rbbox_to_corners(corners2, rbbox2) num_intersection = quadrilateral_intersection(corners1, corners2, intersection_corners) sort_vertex_in_convex_polygon(intersection_corners, num_intersection) # print(intersection_corners.reshape([-1, 2])[:num_intersection]) return area(intersection_corners, num_intersection)
[ "def", "inter", "(", "rbbox1", ",", "rbbox2", ")", ":", "corners1", "=", "cuda", ".", "local", ".", "array", "(", "(", "8", ",", ")", ",", "dtype", "=", "numba", ".", "float32", ")", "corners2", "=", "cuda", ".", "local", ".", "array", "(", "(", "8", ",", ")", ",", "dtype", "=", "numba", ".", "float32", ")", "intersection_corners", "=", "cuda", ".", "local", ".", "array", "(", "(", "16", ",", ")", ",", "dtype", "=", "numba", ".", "float32", ")", "rbbox_to_corners", "(", "corners1", ",", "rbbox1", ")", "rbbox_to_corners", "(", "corners2", ",", "rbbox2", ")", "num_intersection", "=", "quadrilateral_intersection", "(", "corners1", ",", "corners2", ",", "intersection_corners", ")", "sort_vertex_in_convex_polygon", "(", "intersection_corners", ",", "num_intersection", ")", "# print(intersection_corners.reshape([-1, 2])[:num_intersection])", "return", "area", "(", "intersection_corners", ",", "num_intersection", ")" ]
[ 230, 0 ]
[ 252, 55 ]
python
en
['en', 'en', 'en']
True
devRotateIoUEval
(rbox1, rbox2, criterion=-1)
Compute rotated iou on device. Args: rbox1 (np.ndarray, shape=[5]): Rotated 2d box. rbox2 (np.ndarray, shape=[5]): Rotated 2d box. criterion (int, optional): Indicate different type of iou. -1 indicate `area_inter / (area1 + area2 - area_inter)`, 0 indicate `area_inter / area1`, 1 indicate `area_inter / area2`. Returns: float: iou between two input boxes.
Compute rotated iou on device.
def devRotateIoUEval(rbox1, rbox2, criterion=-1): """Compute rotated iou on device. Args: rbox1 (np.ndarray, shape=[5]): Rotated 2d box. rbox2 (np.ndarray, shape=[5]): Rotated 2d box. criterion (int, optional): Indicate different type of iou. -1 indicate `area_inter / (area1 + area2 - area_inter)`, 0 indicate `area_inter / area1`, 1 indicate `area_inter / area2`. Returns: float: iou between two input boxes. """ area1 = rbox1[2] * rbox1[3] area2 = rbox2[2] * rbox2[3] area_inter = inter(rbox1, rbox2) if criterion == -1: return area_inter / (area1 + area2 - area_inter) elif criterion == 0: return area_inter / area1 elif criterion == 1: return area_inter / area2 else: return area_inter
[ "def", "devRotateIoUEval", "(", "rbox1", ",", "rbox2", ",", "criterion", "=", "-", "1", ")", ":", "area1", "=", "rbox1", "[", "2", "]", "*", "rbox1", "[", "3", "]", "area2", "=", "rbox2", "[", "2", "]", "*", "rbox2", "[", "3", "]", "area_inter", "=", "inter", "(", "rbox1", ",", "rbox2", ")", "if", "criterion", "==", "-", "1", ":", "return", "area_inter", "/", "(", "area1", "+", "area2", "-", "area_inter", ")", "elif", "criterion", "==", "0", ":", "return", "area_inter", "/", "area1", "elif", "criterion", "==", "1", ":", "return", "area_inter", "/", "area2", "else", ":", "return", "area_inter" ]
[ 256, 0 ]
[ 280, 25 ]
python
en
['en', 'en', 'en']
True
rotate_iou_kernel_eval
(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1)
Kernel of computing rotated iou. Args: N (int): The number of boxes. K (int): The number of query boxes. dev_boxes (np.ndarray): Boxes on device. dev_query_boxes (np.ndarray): Query boxes on device. dev_iou (np.ndarray): Computed iou to return. criterion (int, optional): Indicate different type of iou. -1 indicate `area_inter / (area1 + area2 - area_inter)`, 0 indicate `area_inter / area1`, 1 indicate `area_inter / area2`.
Kernel of computing rotated iou.
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1): """Kernel of computing rotated iou. Args: N (int): The number of boxes. K (int): The number of query boxes. dev_boxes (np.ndarray): Boxes on device. dev_query_boxes (np.ndarray): Query boxes on device. dev_iou (np.ndarray): Computed iou to return. criterion (int, optional): Indicate different type of iou. -1 indicate `area_inter / (area1 + area2 - area_inter)`, 0 indicate `area_inter / area1`, 1 indicate `area_inter / area2`. """ threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.x col_start = cuda.blockIdx.y tx = cuda.threadIdx.x row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) dev_query_box_idx = threadsPerBlock * col_start + tx dev_box_idx = threadsPerBlock * row_start + tx if (tx < col_size): block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] if (tx < row_size): block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] cuda.syncthreads() if tx < row_size: for i in range(col_size): offset = ( row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i) dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], block_boxes[tx * 5:tx * 5 + 5], criterion)
[ "def", "rotate_iou_kernel_eval", "(", "N", ",", "K", ",", "dev_boxes", ",", "dev_query_boxes", ",", "dev_iou", ",", "criterion", "=", "-", "1", ")", ":", "threadsPerBlock", "=", "8", "*", "8", "row_start", "=", "cuda", ".", "blockIdx", ".", "x", "col_start", "=", "cuda", ".", "blockIdx", ".", "y", "tx", "=", "cuda", ".", "threadIdx", ".", "x", "row_size", "=", "min", "(", "N", "-", "row_start", "*", "threadsPerBlock", ",", "threadsPerBlock", ")", "col_size", "=", "min", "(", "K", "-", "col_start", "*", "threadsPerBlock", ",", "threadsPerBlock", ")", "block_boxes", "=", "cuda", ".", "shared", ".", "array", "(", "shape", "=", "(", "64", "*", "5", ",", ")", ",", "dtype", "=", "numba", ".", "float32", ")", "block_qboxes", "=", "cuda", ".", "shared", ".", "array", "(", "shape", "=", "(", "64", "*", "5", ",", ")", ",", "dtype", "=", "numba", ".", "float32", ")", "dev_query_box_idx", "=", "threadsPerBlock", "*", "col_start", "+", "tx", "dev_box_idx", "=", "threadsPerBlock", "*", "row_start", "+", "tx", "if", "(", "tx", "<", "col_size", ")", ":", "block_qboxes", "[", "tx", "*", "5", "+", "0", "]", "=", "dev_query_boxes", "[", "dev_query_box_idx", "*", "5", "+", "0", "]", "block_qboxes", "[", "tx", "*", "5", "+", "1", "]", "=", "dev_query_boxes", "[", "dev_query_box_idx", "*", "5", "+", "1", "]", "block_qboxes", "[", "tx", "*", "5", "+", "2", "]", "=", "dev_query_boxes", "[", "dev_query_box_idx", "*", "5", "+", "2", "]", "block_qboxes", "[", "tx", "*", "5", "+", "3", "]", "=", "dev_query_boxes", "[", "dev_query_box_idx", "*", "5", "+", "3", "]", "block_qboxes", "[", "tx", "*", "5", "+", "4", "]", "=", "dev_query_boxes", "[", "dev_query_box_idx", "*", "5", "+", "4", "]", "if", "(", "tx", "<", "row_size", ")", ":", "block_boxes", "[", "tx", "*", "5", "+", "0", "]", "=", "dev_boxes", "[", "dev_box_idx", "*", "5", "+", "0", "]", "block_boxes", "[", "tx", "*", "5", "+", "1", "]", "=", "dev_boxes", "[", "dev_box_idx", "*", "5", "+", "1", "]", "block_boxes", "[", "tx", "*", "5", "+", "2", "]", "=", "dev_boxes", "[", "dev_box_idx", "*", "5", "+", "2", "]", "block_boxes", "[", "tx", "*", "5", "+", "3", "]", "=", "dev_boxes", "[", "dev_box_idx", "*", "5", "+", "3", "]", "block_boxes", "[", "tx", "*", "5", "+", "4", "]", "=", "dev_boxes", "[", "dev_box_idx", "*", "5", "+", "4", "]", "cuda", ".", "syncthreads", "(", ")", "if", "tx", "<", "row_size", ":", "for", "i", "in", "range", "(", "col_size", ")", ":", "offset", "=", "(", "row_start", "*", "threadsPerBlock", "*", "K", "+", "col_start", "*", "threadsPerBlock", "+", "tx", "*", "K", "+", "i", ")", "dev_iou", "[", "offset", "]", "=", "devRotateIoUEval", "(", "block_qboxes", "[", "i", "*", "5", ":", "i", "*", "5", "+", "5", "]", ",", "block_boxes", "[", "tx", "*", "5", ":", "tx", "*", "5", "+", "5", "]", ",", "criterion", ")" ]
[ 286, 0 ]
[ 336, 57 ]
python
en
['en', 'mi', 'en']
True
rotate_iou_gpu_eval
(boxes, query_boxes, criterion=-1, device_id=0)
Rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). Args: boxes (torch.Tensor): rbboxes. format: centers, dims, angles(clockwise when positive) with the shape of [N, 5]. query_boxes (float tensor: [K, 5]): rbboxes to compute iou with boxes. device_id (int, optional): Defaults to 0. Device to use. criterion (int, optional): Indicate different type of iou. -1 indicate `area_inter / (area1 + area2 - area_inter)`, 0 indicate `area_inter / area1`, 1 indicate `area_inter / area2`. Returns: np.ndarray: IoU results.
Rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): """Rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). Args: boxes (torch.Tensor): rbboxes. format: centers, dims, angles(clockwise when positive) with the shape of [N, 5]. query_boxes (float tensor: [K, 5]): rbboxes to compute iou with boxes. device_id (int, optional): Defaults to 0. Device to use. criterion (int, optional): Indicate different type of iou. -1 indicate `area_inter / (area1 + area2 - area_inter)`, 0 indicate `area_inter / area1`, 1 indicate `area_inter / area2`. Returns: np.ndarray: IoU results. """ boxes = boxes.astype(np.float32) query_boxes = query_boxes.astype(np.float32) N = boxes.shape[0] K = query_boxes.shape[0] iou = np.zeros((N, K), dtype=np.float32) if N == 0 or K == 0: return iou threadsPerBlock = 8 * 8 cuda.select_device(device_id) blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) iou_dev = cuda.to_device(iou.reshape([-1]), stream) rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](N, K, boxes_dev, query_boxes_dev, iou_dev, criterion) iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) return iou.astype(boxes.dtype)
[ "def", "rotate_iou_gpu_eval", "(", "boxes", ",", "query_boxes", ",", "criterion", "=", "-", "1", ",", "device_id", "=", "0", ")", ":", "boxes", "=", "boxes", ".", "astype", "(", "np", ".", "float32", ")", "query_boxes", "=", "query_boxes", ".", "astype", "(", "np", ".", "float32", ")", "N", "=", "boxes", ".", "shape", "[", "0", "]", "K", "=", "query_boxes", ".", "shape", "[", "0", "]", "iou", "=", "np", ".", "zeros", "(", "(", "N", ",", "K", ")", ",", "dtype", "=", "np", ".", "float32", ")", "if", "N", "==", "0", "or", "K", "==", "0", ":", "return", "iou", "threadsPerBlock", "=", "8", "*", "8", "cuda", ".", "select_device", "(", "device_id", ")", "blockspergrid", "=", "(", "div_up", "(", "N", ",", "threadsPerBlock", ")", ",", "div_up", "(", "K", ",", "threadsPerBlock", ")", ")", "stream", "=", "cuda", ".", "stream", "(", ")", "with", "stream", ".", "auto_synchronize", "(", ")", ":", "boxes_dev", "=", "cuda", ".", "to_device", "(", "boxes", ".", "reshape", "(", "[", "-", "1", "]", ")", ",", "stream", ")", "query_boxes_dev", "=", "cuda", ".", "to_device", "(", "query_boxes", ".", "reshape", "(", "[", "-", "1", "]", ")", ",", "stream", ")", "iou_dev", "=", "cuda", ".", "to_device", "(", "iou", ".", "reshape", "(", "[", "-", "1", "]", ")", ",", "stream", ")", "rotate_iou_kernel_eval", "[", "blockspergrid", ",", "threadsPerBlock", ",", "stream", "]", "(", "N", ",", "K", ",", "boxes_dev", ",", "query_boxes_dev", ",", "iou_dev", ",", "criterion", ")", "iou_dev", ".", "copy_to_host", "(", "iou", ".", "reshape", "(", "[", "-", "1", "]", ")", ",", "stream", "=", "stream", ")", "return", "iou", ".", "astype", "(", "boxes", ".", "dtype", ")" ]
[ 339, 0 ]
[ 377, 34 ]
python
en
['en', 'jv', 'en']
True
Ants.ant
(self, ctx, genus: str, species: str=None, subspecies: str=None)
Bring up some simple info on an ant genus or species.
Bring up some simple info on an ant genus or species.
async def ant(self, ctx, genus: str, species: str=None, subspecies: str=None): """Bring up some simple info on an ant genus or species.""" genus = genus.lower().capitalize() if species is not None: species = species.lower() if subspecies is not None: subspecies = subspecies.lower() if species is None: antwiki_url = f'http://antwiki.org/wiki/{genus}' antweb_url = f'http://antweb.org/description.do?genus={genus}&rank=genus' elif subspecies is None: antwiki_url = f'http://antwiki.org/wiki/{genus}_{species}' antweb_url = f'http://antweb.org/description.do?genus={genus}&species={species}&rank=species' else: antwiki_url = f'http://antwiki.org/wiki/{genus}_{species}_{subspecies}' antweb_url = f'http://antweb.org/description.do?genus={genus}&species={species}&subspecies={subspecies}&rank=subspecies' async with ctx.channel.typing(): async with self.bot.session.get(antwiki_url) as resp: text = await resp.read() antwiki_soup = BeautifulSoup(text, 'html.parser') error = antwiki_soup.find('table', attrs={'class': 'infobox biota'}) if error is not None: name = antwiki_soup.find('h1', id='firstHeading').span.i.text image = antwiki_soup.find('table', attrs={'class': 'infobox biota'}).find('img') image = image['src'] if image is not None else None image = f'http://antwiki.org{image}' if image is not None else '' subfamily = antwiki_soup.find('span', attrs={'class': 'subfamily'}) subfamily = subfamily.a.text if subfamily is not None else '' tribe = antwiki_soup.find('span', attrs={'class': 'tribe'}) tribe = tribe.a.text if tribe is not None else '' embed = discord.Embed(colour=discord.Colour.green()) embed.title = name embed.set_thumbnail(url=image) embed.description = f'**Subfamily:** {subfamily}\n**Tribe:** {tribe}' embed.add_field(name='AntWiki', value=antwiki_url) embed.add_field(name='AntWeb', value=antweb_url) await ctx.send(embed=embed) else: await ctx.send('Not found.')
[ "async", "def", "ant", "(", "self", ",", "ctx", ",", "genus", ":", "str", ",", "species", ":", "str", "=", "None", ",", "subspecies", ":", "str", "=", "None", ")", ":", "genus", "=", "genus", ".", "lower", "(", ")", ".", "capitalize", "(", ")", "if", "species", "is", "not", "None", ":", "species", "=", "species", ".", "lower", "(", ")", "if", "subspecies", "is", "not", "None", ":", "subspecies", "=", "subspecies", ".", "lower", "(", ")", "if", "species", "is", "None", ":", "antwiki_url", "=", "f'http://antwiki.org/wiki/{genus}'", "antweb_url", "=", "f'http://antweb.org/description.do?genus={genus}&rank=genus'", "elif", "subspecies", "is", "None", ":", "antwiki_url", "=", "f'http://antwiki.org/wiki/{genus}_{species}'", "antweb_url", "=", "f'http://antweb.org/description.do?genus={genus}&species={species}&rank=species'", "else", ":", "antwiki_url", "=", "f'http://antwiki.org/wiki/{genus}_{species}_{subspecies}'", "antweb_url", "=", "f'http://antweb.org/description.do?genus={genus}&species={species}&subspecies={subspecies}&rank=subspecies'", "async", "with", "ctx", ".", "channel", ".", "typing", "(", ")", ":", "async", "with", "self", ".", "bot", ".", "session", ".", "get", "(", "antwiki_url", ")", "as", "resp", ":", "text", "=", "await", "resp", ".", "read", "(", ")", "antwiki_soup", "=", "BeautifulSoup", "(", "text", ",", "'html.parser'", ")", "error", "=", "antwiki_soup", ".", "find", "(", "'table'", ",", "attrs", "=", "{", "'class'", ":", "'infobox biota'", "}", ")", "if", "error", "is", "not", "None", ":", "name", "=", "antwiki_soup", ".", "find", "(", "'h1'", ",", "id", "=", "'firstHeading'", ")", ".", "span", ".", "i", ".", "text", "image", "=", "antwiki_soup", ".", "find", "(", "'table'", ",", "attrs", "=", "{", "'class'", ":", "'infobox biota'", "}", ")", ".", "find", "(", "'img'", ")", "image", "=", "image", "[", "'src'", "]", "if", "image", "is", "not", "None", "else", "None", "image", "=", "f'http://antwiki.org{image}'", "if", "image", "is", "not", "None", "else", "''", "subfamily", "=", "antwiki_soup", ".", "find", "(", "'span'", ",", "attrs", "=", "{", "'class'", ":", "'subfamily'", "}", ")", "subfamily", "=", "subfamily", ".", "a", ".", "text", "if", "subfamily", "is", "not", "None", "else", "''", "tribe", "=", "antwiki_soup", ".", "find", "(", "'span'", ",", "attrs", "=", "{", "'class'", ":", "'tribe'", "}", ")", "tribe", "=", "tribe", ".", "a", ".", "text", "if", "tribe", "is", "not", "None", "else", "''", "embed", "=", "discord", ".", "Embed", "(", "colour", "=", "discord", ".", "Colour", ".", "green", "(", ")", ")", "embed", ".", "title", "=", "name", "embed", ".", "set_thumbnail", "(", "url", "=", "image", ")", "embed", ".", "description", "=", "f'**Subfamily:** {subfamily}\\n**Tribe:** {tribe}'", "embed", ".", "add_field", "(", "name", "=", "'AntWiki'", ",", "value", "=", "antwiki_url", ")", "embed", ".", "add_field", "(", "name", "=", "'AntWeb'", ",", "value", "=", "antweb_url", ")", "await", "ctx", ".", "send", "(", "embed", "=", "embed", ")", "else", ":", "await", "ctx", ".", "send", "(", "'Not found.'", ")" ]
[ 18, 4 ]
[ 68, 44 ]
python
en
['en', 'en', 'en']
True
Font.color
(self)
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above
def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"]
[ "def", "color", "(", "self", ")", ":", "return", "self", "[", "\"color\"", "]" ]
[ 15, 4 ]
[ 64, 28 ]
python
en
['en', 'error', 'th']
False
Font.colorsrc
(self)
Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object
def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"]
[ "def", "colorsrc", "(", "self", ")", ":", "return", "self", "[", "\"colorsrc\"", "]" ]
[ 73, 4 ]
[ 84, 31 ]
python
en
['en', 'error', 'th']
False
Font.family
(self)
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above
def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["family"]
[ "def", "family", "(", "self", ")", ":", "return", "self", "[", "\"family\"", "]" ]
[ 93, 4 ]
[ 116, 29 ]
python
en
['en', 'error', 'th']
False
Font.familysrc
(self)
Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object
def familysrc(self): """ Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["familysrc"]
[ "def", "familysrc", "(", "self", ")", ":", "return", "self", "[", "\"familysrc\"", "]" ]
[ 125, 4 ]
[ 136, 32 ]
python
en
['en', 'error', 'th']
False
Font.size
(self)
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above
def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"]
[ "def", "size", "(", "self", ")", ":", "return", "self", "[", "\"size\"", "]" ]
[ 145, 4 ]
[ 155, 27 ]
python
en
['en', 'error', 'th']
False
Font.sizesrc
(self)
Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object
def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"]
[ "def", "sizesrc", "(", "self", ")", ":", "return", "self", "[", "\"sizesrc\"", "]" ]
[ 164, 4 ]
[ 175, 30 ]
python
en
['en', 'error', 'th']
False
Font.__init__
( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs )
Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . Returns ------- Font
Construct a new Font object Sets the font used in hover labels.
def __init__( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs ): """ Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . Returns ------- Font """ super(Font, self).__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.sankey.hoverlabel.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v _v = arg.pop("colorsrc", None) _v = colorsrc if colorsrc is not None else _v if _v is not None: self["colorsrc"] = _v _v = arg.pop("family", None) _v = family if family is not None else _v if _v is not None: self["family"] = _v _v = arg.pop("familysrc", None) _v = familysrc if familysrc is not None else _v if _v is not None: self["familysrc"] = _v _v = arg.pop("size", None) _v = size if size is not None else _v if _v is not None: self["size"] = _v _v = arg.pop("sizesrc", None) _v = sizesrc if sizesrc is not None else _v if _v is not None: self["sizesrc"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
[ "def", "__init__", "(", "self", ",", "arg", "=", "None", ",", "color", "=", "None", ",", "colorsrc", "=", "None", ",", "family", "=", "None", ",", "familysrc", "=", "None", ",", "size", "=", "None", ",", "sizesrc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Font", ",", "self", ")", ".", "__init__", "(", "\"font\"", ")", "if", "\"_parent\"", "in", "kwargs", ":", "self", ".", "_parent", "=", "kwargs", "[", "\"_parent\"", "]", "return", "# Validate arg", "# ------------", "if", "arg", "is", "None", ":", "arg", "=", "{", "}", "elif", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "arg", "=", "arg", ".", "to_plotly_json", "(", ")", "elif", "isinstance", "(", "arg", ",", "dict", ")", ":", "arg", "=", "_copy", ".", "copy", "(", "arg", ")", "else", ":", "raise", "ValueError", "(", "\"\"\"\\\nThe first argument to the plotly.graph_objs.sankey.hoverlabel.Font \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`\"\"\"", ")", "# Handle skip_invalid", "# -------------------", "self", ".", "_skip_invalid", "=", "kwargs", ".", "pop", "(", "\"skip_invalid\"", ",", "False", ")", "self", ".", "_validate", "=", "kwargs", ".", "pop", "(", "\"_validate\"", ",", "True", ")", "# Populate data dict with properties", "# ----------------------------------", "_v", "=", "arg", ".", "pop", "(", "\"color\"", ",", "None", ")", "_v", "=", "color", "if", "color", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"color\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"colorsrc\"", ",", "None", ")", "_v", "=", "colorsrc", "if", "colorsrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"colorsrc\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"family\"", ",", "None", ")", "_v", "=", "family", "if", "family", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"family\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"familysrc\"", ",", "None", ")", "_v", "=", "familysrc", "if", "familysrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"familysrc\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"size\"", ",", "None", ")", "_v", "=", "size", "if", "size", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"size\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"sizesrc\"", ",", "None", ")", "_v", "=", "sizesrc", "if", "sizesrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"sizesrc\"", "]", "=", "_v", "# Process unknown kwargs", "# ----------------------", "self", ".", "_process_kwargs", "(", "*", "*", "dict", "(", "arg", ",", "*", "*", "kwargs", ")", ")", "# Reset skip_invalid", "# ------------------", "self", ".", "_skip_invalid", "=", "False" ]
[ 215, 4 ]
[ 329, 34 ]
python
en
['en', 'error', 'th']
False
TestBlendedSkillTalkModels.test_bst_single_task
(self)
Test model trained single-task on BlendedSkillTalk.
Test model trained single-task on BlendedSkillTalk.
def test_bst_single_task(self): """ Test model trained single-task on BlendedSkillTalk. """ valid, _ = testing_utils.eval_model( opt={ **SHARED_OPTS, 'model_file': f'zoo:blended_skill_talk/bst_single_task/model', }, skip_test=True, ) self.assertAlmostEqual(valid['accuracy'], 0.8906, delta=0.005)
[ "def", "test_bst_single_task", "(", "self", ")", ":", "valid", ",", "_", "=", "testing_utils", ".", "eval_model", "(", "opt", "=", "{", "*", "*", "SHARED_OPTS", ",", "'model_file'", ":", "f'zoo:blended_skill_talk/bst_single_task/model'", ",", "}", ",", "skip_test", "=", "True", ",", ")", "self", ".", "assertAlmostEqual", "(", "valid", "[", "'accuracy'", "]", ",", "0.8906", ",", "delta", "=", "0.005", ")" ]
[ 19, 4 ]
[ 30, 70 ]
python
en
['en', 'error', 'th']
False
TestBlendedSkillTalkModels.test_convai2_single_task
(self)
Test model trained single-task on ConvAI2.
Test model trained single-task on ConvAI2.
def test_convai2_single_task(self): """ Test model trained single-task on ConvAI2. """ valid, _ = testing_utils.eval_model( opt={ **SHARED_OPTS, 'model_file': f'zoo:blended_skill_talk/convai2_single_task/model', }, skip_test=True, ) self.assertAlmostEqual(valid['accuracy'], 0.8438, delta=0.005)
[ "def", "test_convai2_single_task", "(", "self", ")", ":", "valid", ",", "_", "=", "testing_utils", ".", "eval_model", "(", "opt", "=", "{", "*", "*", "SHARED_OPTS", ",", "'model_file'", ":", "f'zoo:blended_skill_talk/convai2_single_task/model'", ",", "}", ",", "skip_test", "=", "True", ",", ")", "self", ".", "assertAlmostEqual", "(", "valid", "[", "'accuracy'", "]", ",", "0.8438", ",", "delta", "=", "0.005", ")" ]
[ 32, 4 ]
[ 43, 70 ]
python
en
['en', 'error', 'th']
False
TestBlendedSkillTalkModels.test_ed_single_task
(self)
Test model trained single-task on EmpatheticDialogues.
Test model trained single-task on EmpatheticDialogues.
def test_ed_single_task(self): """ Test model trained single-task on EmpatheticDialogues. """ valid, _ = testing_utils.eval_model( opt={ **SHARED_OPTS, 'model_file': f'zoo:blended_skill_talk/ed_single_task/model', }, skip_test=True, ) self.assertAlmostEqual(valid['accuracy'], 0.7656, delta=0.005)
[ "def", "test_ed_single_task", "(", "self", ")", ":", "valid", ",", "_", "=", "testing_utils", ".", "eval_model", "(", "opt", "=", "{", "*", "*", "SHARED_OPTS", ",", "'model_file'", ":", "f'zoo:blended_skill_talk/ed_single_task/model'", ",", "}", ",", "skip_test", "=", "True", ",", ")", "self", ".", "assertAlmostEqual", "(", "valid", "[", "'accuracy'", "]", ",", "0.7656", ",", "delta", "=", "0.005", ")" ]
[ 45, 4 ]
[ 56, 70 ]
python
en
['en', 'error', 'th']
False
TestBlendedSkillTalkModels.test_wizard_single_task
(self)
Test model trained single-task on Wizard of Wikipedia.
Test model trained single-task on Wizard of Wikipedia.
def test_wizard_single_task(self): """ Test model trained single-task on Wizard of Wikipedia. """ valid, _ = testing_utils.eval_model( opt={ **SHARED_OPTS, 'model_file': f'zoo:blended_skill_talk/wizard_single_task/model', }, skip_test=True, ) self.assertAlmostEqual(valid['accuracy'], 0.7500, delta=0.005)
[ "def", "test_wizard_single_task", "(", "self", ")", ":", "valid", ",", "_", "=", "testing_utils", ".", "eval_model", "(", "opt", "=", "{", "*", "*", "SHARED_OPTS", ",", "'model_file'", ":", "f'zoo:blended_skill_talk/wizard_single_task/model'", ",", "}", ",", "skip_test", "=", "True", ",", ")", "self", ".", "assertAlmostEqual", "(", "valid", "[", "'accuracy'", "]", ",", "0.7500", ",", "delta", "=", "0.005", ")" ]
[ 58, 4 ]
[ 69, 70 ]
python
en
['en', 'error', 'th']
False
TestBlendedSkillTalkModels.test_multi_task
(self)
Test model trained multi-task on dialogue datasets.
Test model trained multi-task on dialogue datasets.
def test_multi_task(self): """ Test model trained multi-task on dialogue datasets. """ valid, _ = testing_utils.eval_model( opt={ **SHARED_OPTS, 'model_file': f'zoo:blended_skill_talk/multi_task/model', }, skip_test=True, ) self.assertAlmostEqual(valid['accuracy'], 0.9062, delta=0.005)
[ "def", "test_multi_task", "(", "self", ")", ":", "valid", ",", "_", "=", "testing_utils", ".", "eval_model", "(", "opt", "=", "{", "*", "*", "SHARED_OPTS", ",", "'model_file'", ":", "f'zoo:blended_skill_talk/multi_task/model'", ",", "}", ",", "skip_test", "=", "True", ",", ")", "self", ".", "assertAlmostEqual", "(", "valid", "[", "'accuracy'", "]", ",", "0.9062", ",", "delta", "=", "0.005", ")" ]
[ 71, 4 ]
[ 82, 70 ]
python
en
['en', 'error', 'th']
False
TestBlendedSkillTalkModels.test_multi_task_bst_tuned
(self)
Test model trained multi-task and then tuned on BlendedSkillTalk.
Test model trained multi-task and then tuned on BlendedSkillTalk.
def test_multi_task_bst_tuned(self): """ Test model trained multi-task and then tuned on BlendedSkillTalk. """ valid, _ = testing_utils.eval_model( opt={ **SHARED_OPTS, 'model_file': f'zoo:blended_skill_talk/multi_task_bst_tuned/model', }, skip_test=True, ) self.assertAlmostEqual(valid['accuracy'], 0.9219, delta=0.005)
[ "def", "test_multi_task_bst_tuned", "(", "self", ")", ":", "valid", ",", "_", "=", "testing_utils", ".", "eval_model", "(", "opt", "=", "{", "*", "*", "SHARED_OPTS", ",", "'model_file'", ":", "f'zoo:blended_skill_talk/multi_task_bst_tuned/model'", ",", "}", ",", "skip_test", "=", "True", ",", ")", "self", ".", "assertAlmostEqual", "(", "valid", "[", "'accuracy'", "]", ",", "0.9219", ",", "delta", "=", "0.005", ")" ]
[ 84, 4 ]
[ 95, 70 ]
python
en
['en', 'error', 'th']
False
Stream.maxpoints
(self)
Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. The 'maxpoints' property is a number and may be specified as: - An int or float in the interval [0, 10000] Returns ------- int|float
Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. The 'maxpoints' property is a number and may be specified as: - An int or float in the interval [0, 10000]
def maxpoints(self): """ Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. The 'maxpoints' property is a number and may be specified as: - An int or float in the interval [0, 10000] Returns ------- int|float """ return self["maxpoints"]
[ "def", "maxpoints", "(", "self", ")", ":", "return", "self", "[", "\"maxpoints\"", "]" ]
[ 15, 4 ]
[ 28, 32 ]
python
en
['en', 'error', 'th']
False
Stream.token
(self)
The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. The 'token' property is a string and must be specified as: - A non-empty string Returns ------- str
The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. The 'token' property is a string and must be specified as: - A non-empty string
def token(self): """ The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. The 'token' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["token"]
[ "def", "token", "(", "self", ")", ":", "return", "self", "[", "\"token\"", "]" ]
[ 37, 4 ]
[ 50, 28 ]
python
en
['en', 'error', 'th']
False
Stream.__init__
(self, arg=None, maxpoints=None, token=None, **kwargs)
Construct a new Stream object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.mesh3d.Stream` maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. Returns ------- Stream
Construct a new Stream object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.mesh3d.Stream` maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details.
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs): """ Construct a new Stream object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.mesh3d.Stream` maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. Returns ------- Stream """ super(Stream, self).__init__("stream") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.mesh3d.Stream constructor must be a dict or an instance of :class:`plotly.graph_objs.mesh3d.Stream`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("maxpoints", None) _v = maxpoints if maxpoints is not None else _v if _v is not None: self["maxpoints"] = _v _v = arg.pop("token", None) _v = token if token is not None else _v if _v is not None: self["token"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
[ "def", "__init__", "(", "self", ",", "arg", "=", "None", ",", "maxpoints", "=", "None", ",", "token", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Stream", ",", "self", ")", ".", "__init__", "(", "\"stream\"", ")", "if", "\"_parent\"", "in", "kwargs", ":", "self", ".", "_parent", "=", "kwargs", "[", "\"_parent\"", "]", "return", "# Validate arg", "# ------------", "if", "arg", "is", "None", ":", "arg", "=", "{", "}", "elif", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "arg", "=", "arg", ".", "to_plotly_json", "(", ")", "elif", "isinstance", "(", "arg", ",", "dict", ")", ":", "arg", "=", "_copy", ".", "copy", "(", "arg", ")", "else", ":", "raise", "ValueError", "(", "\"\"\"\\\nThe first argument to the plotly.graph_objs.mesh3d.Stream \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.mesh3d.Stream`\"\"\"", ")", "# Handle skip_invalid", "# -------------------", "self", ".", "_skip_invalid", "=", "kwargs", ".", "pop", "(", "\"skip_invalid\"", ",", "False", ")", "self", ".", "_validate", "=", "kwargs", ".", "pop", "(", "\"_validate\"", ",", "True", ")", "# Populate data dict with properties", "# ----------------------------------", "_v", "=", "arg", ".", "pop", "(", "\"maxpoints\"", ",", "None", ")", "_v", "=", "maxpoints", "if", "maxpoints", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"maxpoints\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"token\"", ",", "None", ")", "_v", "=", "token", "if", "token", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"token\"", "]", "=", "_v", "# Process unknown kwargs", "# ----------------------", "self", ".", "_process_kwargs", "(", "*", "*", "dict", "(", "arg", ",", "*", "*", "kwargs", ")", ")", "# Reset skip_invalid", "# ------------------", "self", ".", "_skip_invalid", "=", "False" ]
[ 72, 4 ]
[ 139, 34 ]
python
en
['en', 'error', 'th']
False
Textfont.color
(self)
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above
def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"]
[ "def", "color", "(", "self", ")", ":", "return", "self", "[", "\"color\"", "]" ]
[ 15, 4 ]
[ 64, 28 ]
python
en
['en', 'error', 'th']
False
Textfont.colorsrc
(self)
Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object
def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"]
[ "def", "colorsrc", "(", "self", ")", ":", "return", "self", "[", "\"colorsrc\"", "]" ]
[ 73, 4 ]
[ 84, 31 ]
python
en
['en', 'error', 'th']
False
Textfont.family
(self)
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above
def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["family"]
[ "def", "family", "(", "self", ")", ":", "return", "self", "[", "\"family\"", "]" ]
[ 93, 4 ]
[ 116, 29 ]
python
en
['en', 'error', 'th']
False
Textfont.familysrc
(self)
Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object
def familysrc(self): """ Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["familysrc"]
[ "def", "familysrc", "(", "self", ")", ":", "return", "self", "[", "\"familysrc\"", "]" ]
[ 125, 4 ]
[ 136, 32 ]
python
en
['en', 'error', 'th']
False
Textfont.size
(self)
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above
def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"]
[ "def", "size", "(", "self", ")", ":", "return", "self", "[", "\"size\"", "]" ]
[ 145, 4 ]
[ 155, 27 ]
python
en
['en', 'error', 'th']
False
Textfont.sizesrc
(self)
Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object
def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"]
[ "def", "sizesrc", "(", "self", ")", ":", "return", "self", "[", "\"sizesrc\"", "]" ]
[ 164, 4 ]
[ 175, 30 ]
python
en
['en', 'error', 'th']
False
Textfont.__init__
( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs )
Construct a new Textfont object Sets the font used for `textinfo`. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.pie.Textfont` color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . Returns ------- Textfont
Construct a new Textfont object Sets the font used for `textinfo`.
def __init__( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs ): """ Construct a new Textfont object Sets the font used for `textinfo`. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.pie.Textfont` color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . Returns ------- Textfont """ super(Textfont, self).__init__("textfont") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.pie.Textfont constructor must be a dict or an instance of :class:`plotly.graph_objs.pie.Textfont`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v _v = arg.pop("colorsrc", None) _v = colorsrc if colorsrc is not None else _v if _v is not None: self["colorsrc"] = _v _v = arg.pop("family", None) _v = family if family is not None else _v if _v is not None: self["family"] = _v _v = arg.pop("familysrc", None) _v = familysrc if familysrc is not None else _v if _v is not None: self["familysrc"] = _v _v = arg.pop("size", None) _v = size if size is not None else _v if _v is not None: self["size"] = _v _v = arg.pop("sizesrc", None) _v = sizesrc if sizesrc is not None else _v if _v is not None: self["sizesrc"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
[ "def", "__init__", "(", "self", ",", "arg", "=", "None", ",", "color", "=", "None", ",", "colorsrc", "=", "None", ",", "family", "=", "None", ",", "familysrc", "=", "None", ",", "size", "=", "None", ",", "sizesrc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Textfont", ",", "self", ")", ".", "__init__", "(", "\"textfont\"", ")", "if", "\"_parent\"", "in", "kwargs", ":", "self", ".", "_parent", "=", "kwargs", "[", "\"_parent\"", "]", "return", "# Validate arg", "# ------------", "if", "arg", "is", "None", ":", "arg", "=", "{", "}", "elif", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "arg", "=", "arg", ".", "to_plotly_json", "(", ")", "elif", "isinstance", "(", "arg", ",", "dict", ")", ":", "arg", "=", "_copy", ".", "copy", "(", "arg", ")", "else", ":", "raise", "ValueError", "(", "\"\"\"\\\nThe first argument to the plotly.graph_objs.pie.Textfont \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.pie.Textfont`\"\"\"", ")", "# Handle skip_invalid", "# -------------------", "self", ".", "_skip_invalid", "=", "kwargs", ".", "pop", "(", "\"skip_invalid\"", ",", "False", ")", "self", ".", "_validate", "=", "kwargs", ".", "pop", "(", "\"_validate\"", ",", "True", ")", "# Populate data dict with properties", "# ----------------------------------", "_v", "=", "arg", ".", "pop", "(", "\"color\"", ",", "None", ")", "_v", "=", "color", "if", "color", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"color\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"colorsrc\"", ",", "None", ")", "_v", "=", "colorsrc", "if", "colorsrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"colorsrc\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"family\"", ",", "None", ")", "_v", "=", "family", "if", "family", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"family\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"familysrc\"", ",", "None", ")", "_v", "=", "familysrc", "if", "familysrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"familysrc\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"size\"", ",", "None", ")", "_v", "=", "size", "if", "size", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"size\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"sizesrc\"", ",", "None", ")", "_v", "=", "sizesrc", "if", "sizesrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"sizesrc\"", "]", "=", "_v", "# Process unknown kwargs", "# ----------------------", "self", ".", "_process_kwargs", "(", "*", "*", "dict", "(", "arg", ",", "*", "*", "kwargs", ")", ")", "# Reset skip_invalid", "# ------------------", "self", ".", "_skip_invalid", "=", "False" ]
[ 215, 4 ]
[ 328, 34 ]
python
en
['en', 'error', 'th']
False
ColorBar.bgcolor
(self)
Sets the color of padded area. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str
Sets the color of padded area. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen
def bgcolor(self): """ Sets the color of padded area. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["bgcolor"]
[ "def", "bgcolor", "(", "self", ")", ":", "return", "self", "[", "\"bgcolor\"", "]" ]
[ 59, 4 ]
[ 109, 30 ]
python
en
['en', 'error', 'th']
False
ColorBar.bordercolor
(self)
Sets the axis line color. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str
Sets the axis line color. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen
def bordercolor(self): """ Sets the axis line color. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["bordercolor"]
[ "def", "bordercolor", "(", "self", ")", ":", "return", "self", "[", "\"bordercolor\"", "]" ]
[ 118, 4 ]
[ 168, 34 ]
python
en
['en', 'error', 'th']
False
ColorBar.borderwidth
(self)
Sets the width (in px) or the border enclosing this color bar. The 'borderwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float
Sets the width (in px) or the border enclosing this color bar. The 'borderwidth' property is a number and may be specified as: - An int or float in the interval [0, inf]
def borderwidth(self): """ Sets the width (in px) or the border enclosing this color bar. The 'borderwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["borderwidth"]
[ "def", "borderwidth", "(", "self", ")", ":", "return", "self", "[", "\"borderwidth\"", "]" ]
[ 177, 4 ]
[ 188, 34 ]
python
en
['en', 'error', 'th']
False
ColorBar.dtick
(self)
Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" The 'dtick' property accepts values of any type Returns ------- Any
Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" The 'dtick' property accepts values of any type
def dtick(self): """ Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" The 'dtick' property accepts values of any type Returns ------- Any """ return self["dtick"]
[ "def", "dtick", "(", "self", ")", ":", "return", "self", "[", "\"dtick\"", "]" ]
[ 197, 4 ]
[ 226, 28 ]
python
en
['en', 'error', 'th']
False
ColorBar.exponentformat
(self)
Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. The 'exponentformat' property is an enumeration that may be specified as: - One of the following enumeration values: ['none', 'e', 'E', 'power', 'SI', 'B'] Returns ------- Any
Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. The 'exponentformat' property is an enumeration that may be specified as: - One of the following enumeration values: ['none', 'e', 'E', 'power', 'SI', 'B']
def exponentformat(self): """ Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. The 'exponentformat' property is an enumeration that may be specified as: - One of the following enumeration values: ['none', 'e', 'E', 'power', 'SI', 'B'] Returns ------- Any """ return self["exponentformat"]
[ "def", "exponentformat", "(", "self", ")", ":", "return", "self", "[", "\"exponentformat\"", "]" ]
[ 235, 4 ]
[ 251, 37 ]
python
en
['en', 'error', 'th']
False
ColorBar.len
(self)
Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. The 'len' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float
Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. The 'len' property is a number and may be specified as: - An int or float in the interval [0, inf]
def len(self): """ Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. The 'len' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["len"]
[ "def", "len", "(", "self", ")", ":", "return", "self", "[", "\"len\"", "]" ]
[ 260, 4 ]
[ 273, 26 ]
python
en
['en', 'error', 'th']
False
ColorBar.lenmode
(self)
Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. The 'lenmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any
Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. The 'lenmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels']
def lenmode(self): """ Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. The 'lenmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["lenmode"]
[ "def", "lenmode", "(", "self", ")", ":", "return", "self", "[", "\"lenmode\"", "]" ]
[ 282, 4 ]
[ 296, 30 ]
python
en
['en', 'error', 'th']
False