code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
async def _send_sack(self): """ Build and send a selective acknowledgement (SACK) chunk. """ gaps = [] gap_next = None for tsn in sorted(self._sack_misordered): pos = (tsn - self._last_received_tsn) % SCTP_TSN_MODULO if tsn == gap_next: gaps[-1][1] = pos else: gaps.append([pos, pos]) gap_next = tsn_plus_one(tsn) sack = SackChunk() sack.cumulative_tsn = self._last_received_tsn sack.advertised_rwnd = max(0, self._advertised_rwnd) sack.duplicates = self._sack_duplicates[:] sack.gaps = [tuple(x) for x in gaps] await self._send_chunk(sack) self._sack_duplicates.clear() self._sack_needed = False
Build and send a selective acknowledgement (SACK) chunk.
_send_sack
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
def _set_state(self, state) -> None: """ Transition the SCTP association to a new state. """ if state != self._association_state: self.__log_debug("- %s -> %s", self._association_state, state) self._association_state = state if state == self.State.ESTABLISHED: self.__state = "connected" for channel in list(self._data_channels.values()): if channel.negotiated and channel.readyState != "open": channel._setReadyState("open") asyncio.ensure_future(self._data_channel_flush()) elif state == self.State.CLOSED: self._t1_cancel() self._t2_cancel() self._t3_cancel() self.__state = "closed" # close data channels for stream_id in list(self._data_channels.keys()): self._data_channel_closed(stream_id) # no more events will be emitted, so remove all event listeners # to facilitate garbage collection. self.remove_all_listeners()
Transition the SCTP association to a new state.
_set_state
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
async def _transmit(self) -> None: """ Transmit outbound data. """ # send FORWARD TSN if self._forward_tsn_chunk is not None: await self._send_chunk(self._forward_tsn_chunk) self._forward_tsn_chunk = None # ensure T3 is running if not self._t3_handle: self._t3_start() # limit burst size if self._fast_recovery_exit is not None: burst_size = 2 * USERDATA_MAX_LENGTH else: burst_size = 4 * USERDATA_MAX_LENGTH cwnd = min(self._flight_size + burst_size, self._cwnd) # retransmit retransmit_earliest = True for chunk in self._sent_queue: if chunk._retransmit: if self._fast_recovery_transmit: self._fast_recovery_transmit = False elif self._flight_size >= cwnd: return self._flight_size_increase(chunk) chunk._misses = 0 chunk._retransmit = False chunk._sent_count += 1 await self._send_chunk(chunk) if retransmit_earliest: # restart the T3 timer as the earliest outstanding TSN # is being retransmitted self._t3_restart() retransmit_earliest = False while self._outbound_queue and self._flight_size < cwnd: chunk = self._outbound_queue.popleft() self._sent_queue.append(chunk) self._flight_size_increase(chunk) # update counters chunk._sent_count += 1 chunk._sent_time = time.time() await self._send_chunk(chunk) if not self._t3_handle: self._t3_start()
Transmit outbound data.
_transmit
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
def _update_advanced_peer_ack_point(self) -> None: """ Try to advance "Advanced.Peer.Ack.Point" according to RFC 3758. """ if uint32_gt(self._last_sacked_tsn, self._advanced_peer_ack_tsn): self._advanced_peer_ack_tsn = self._last_sacked_tsn done = 0 streams = {} while self._sent_queue and self._sent_queue[0]._abandoned: chunk = self._sent_queue.popleft() self._advanced_peer_ack_tsn = chunk.tsn done += 1 if not (chunk.flags & SCTP_DATA_UNORDERED): streams[chunk.stream_id] = chunk.stream_seq if done: # build FORWARD TSN self._forward_tsn_chunk = ForwardTsnChunk() self._forward_tsn_chunk.cumulative_tsn = self._advanced_peer_ack_tsn self._forward_tsn_chunk.streams = list(streams.items())
Try to advance "Advanced.Peer.Ack.Point" according to RFC 3758.
_update_advanced_peer_ack_point
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
def _update_rto(self, R: float) -> None: """ Update RTO given a new roundtrip measurement R. """ if self._srtt is None: self._rttvar = R / 2 self._srtt = R else: self._rttvar = (1 - SCTP_RTO_BETA) * self._rttvar + SCTP_RTO_BETA * abs( self._srtt - R ) self._srtt = (1 - SCTP_RTO_ALPHA) * self._srtt + SCTP_RTO_ALPHA * R self._rto = max(SCTP_RTO_MIN, min(self._srtt + 4 * self._rttvar, SCTP_RTO_MAX))
Update RTO given a new roundtrip measurement R.
_update_rto
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
def _data_channel_close(self, channel, transmit=True): """ Request closing the datachannel by sending an Outgoing Stream Reset Request. """ if channel.readyState not in ["closing", "closed"]: channel._setReadyState("closing") if self._association_state == self.State.ESTABLISHED: # queue a stream reset self._reconfig_queue.append(channel.id) if len(self._reconfig_queue) == 1: asyncio.ensure_future(self._transmit_reconfig()) else: # remove any queued messages for the datachannel new_queue = deque() for queue_item in self._data_channel_queue: if queue_item[0] != channel: new_queue.append(queue_item) self._data_channel_queue = new_queue # mark the datachannel as closed if channel.id is not None: self._data_channels.pop(channel.id) channel._setReadyState("closed")
Request closing the datachannel by sending an Outgoing Stream Reset Request.
_data_channel_close
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
async def _data_channel_flush(self) -> None: """ Try to flush buffered data to the SCTP layer. We wait until the association is established, as we need to know whether we are a client or a server to correctly assign an odd/even ID to the data channels. """ if self._association_state != self.State.ESTABLISHED: return while self._data_channel_queue and not self._outbound_queue: channel, protocol, user_data = self._data_channel_queue.popleft() # register channel if necessary stream_id = channel.id if stream_id is None: stream_id = self._data_channel_id while stream_id in self._data_channels: stream_id += 2 self._data_channels[stream_id] = channel channel._setId(stream_id) # send data if protocol == WEBRTC_DCEP: await self._send(stream_id, protocol, user_data) else: if channel.maxPacketLifeTime: expiry = time.time() + (channel.maxPacketLifeTime / 1000) else: expiry = None await self._send( stream_id, protocol, user_data, expiry=expiry, max_retransmits=channel.maxRetransmits, ordered=channel.ordered, ) channel._addBufferedAmount(-len(user_data))
Try to flush buffered data to the SCTP layer. We wait until the association is established, as we need to know whether we are a client or a server to correctly assign an odd/even ID to the data channels.
_data_channel_flush
python
aiortc/aiortc
src/aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcsctptransport.py
BSD-3-Clause
def expires(self) -> datetime.datetime: """ The date and time after which the certificate will be considered invalid. """ return self._cert.to_cryptography().not_valid_after_utc
The date and time after which the certificate will be considered invalid.
expires
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
def getFingerprints(self) -> List[RTCDtlsFingerprint]: """ Returns the list of certificate fingerprints, one of which is computed with the digest algorithm used in the certificate signature. """ return [ RTCDtlsFingerprint( algorithm=algorithm, value=certificate_digest(self._cert, algorithm), ) for algorithm in X509_DIGEST_ALGORITHMS.keys() ]
Returns the list of certificate fingerprints, one of which is computed with the digest algorithm used in the certificate signature.
getFingerprints
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
def generateCertificate(cls: Type[CERTIFICATE_T]) -> CERTIFICATE_T: """ Create and return an X.509 certificate and corresponding private key. :rtype: RTCCertificate """ key = ec.generate_private_key(ec.SECP256R1(), default_backend()) cert = generate_certificate(key) return cls( key=crypto.PKey.from_cryptography_key(key), cert=crypto.X509.from_cryptography(cert), )
Create and return an X.509 certificate and corresponding private key. :rtype: RTCCertificate
generateCertificate
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
def state(self) -> str: """ The current state of the DTLS transport. One of `'new'`, `'connecting'`, `'connected'`, `'closed'` or `'failed'`. """ return str(self._state)[6:].lower()
The current state of the DTLS transport. One of `'new'`, `'connecting'`, `'connected'`, `'closed'` or `'failed'`.
state
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
def transport(self): """ The associated :class:`RTCIceTransport` instance. """ return self._transport
The associated :class:`RTCIceTransport` instance.
transport
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
def getLocalParameters(self) -> RTCDtlsParameters: """ Get the local parameters of the DTLS transport. :rtype: :class:`RTCDtlsParameters` """ return RTCDtlsParameters( fingerprints=self.__local_certificate.getFingerprints() )
Get the local parameters of the DTLS transport. :rtype: :class:`RTCDtlsParameters`
getLocalParameters
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
async def start(self, remoteParameters: RTCDtlsParameters) -> None: """ Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param remoteParameters: An :class:`RTCDtlsParameters`. """ assert self._state == State.NEW assert len(remoteParameters.fingerprints) # For WebRTC, the DTLS role is explicitly determined as part of the # offer / answer exchange. # # For ORTC however, we determine the DTLS role based on the ICE role. if self._role == "auto": if self.transport.role == "controlling": self._set_role("server") else: self._set_role("client") # Initialise SSL. self._ssl = SSL.Connection( self.__local_certificate._create_ssl_context( srtp_profiles=self._srtp_profiles ) ) if self._role == "server": self._ssl.set_accept_state() else: self._ssl.set_connect_state() self._set_state(State.CONNECTING) try: while not self.encrypted: try: self._ssl.do_handshake() except SSL.WantReadError: await self._write_ssl() await self._recv_next() except SSL.Error as exc: self.__log_debug("x DTLS handshake failed (error %s)", exc) self._set_state(State.FAILED) return else: self.encrypted = True except ConnectionError: self.__log_debug("x DTLS handshake failed (connection error)") self._set_state(State.FAILED) return # Check remote fingerprints. There must be at least one fingerprint # with a supported algorithm, and all supported fingerprints must # match. x509 = self._ssl.get_peer_certificate() fingerprint_supported = 0 fingerprint_valid = 0 for f in remoteParameters.fingerprints: algorithm = f.algorithm.lower() if algorithm in X509_DIGEST_ALGORITHMS: fingerprint_supported += 1 if f.value.upper() == certificate_digest(x509, algorithm): fingerprint_valid += 1 if not fingerprint_supported or fingerprint_valid != fingerprint_supported: self.__log_debug("x DTLS handshake failed (fingerprint mismatch)") self._set_state(State.FAILED) return # generate keying material openssl_profile = self._ssl.get_selected_srtp_profile() for srtp_profile in self._srtp_profiles: if srtp_profile.openssl_profile == openssl_profile: self.__log_debug( "x DTLS handshake negotiated %s", srtp_profile.openssl_profile.decode(), ) break else: self.__log_debug("x DTLS handshake failed (no SRTP profile negotiated)") self._set_state(State.FAILED) return view = self._ssl.export_keying_material( b"EXTRACTOR-dtls_srtp", 2 * (srtp_profile.key_length + srtp_profile.salt_length), ) if self._role == "server": srtp_tx_key = srtp_profile.get_key_and_salt(view, 1) srtp_rx_key = srtp_profile.get_key_and_salt(view, 0) else: srtp_tx_key = srtp_profile.get_key_and_salt(view, 0) srtp_rx_key = srtp_profile.get_key_and_salt(view, 1) rx_policy = Policy( key=srtp_rx_key, ssrc_type=Policy.SSRC_ANY_INBOUND, srtp_profile=srtp_profile.libsrtp_profile, ) rx_policy.allow_repeat_tx = True rx_policy.window_size = 1024 self._rx_srtp = Session(rx_policy) tx_policy = Policy( key=srtp_tx_key, ssrc_type=Policy.SSRC_ANY_OUTBOUND, srtp_profile=srtp_profile.libsrtp_profile, ) tx_policy.allow_repeat_tx = True tx_policy.window_size = 1024 self._tx_srtp = Session(tx_policy) # start data pump self.__log_debug("- DTLS handshake complete") self._set_state(State.CONNECTED) self._task = asyncio.ensure_future(self.__run())
Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param remoteParameters: An :class:`RTCDtlsParameters`.
start
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
async def stop(self) -> None: """ Stop and close the DTLS transport. """ if self._task is not None: self._task.cancel() self._task = None if self._ssl and self._state in [State.CONNECTING, State.CONNECTED]: try: self._ssl.shutdown() except SSL.Error: pass try: await self._write_ssl() except ConnectionError: pass self.__log_debug("- DTLS shutdown complete")
Stop and close the DTLS transport.
stop
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
async def _write_ssl(self) -> None: """ Flush outgoing data which OpenSSL put in our BIO to the transport. """ try: data = self._ssl.bio_read(1500) except SSL.Error: data = b"" if data: await self.transport._send(data) self.__tx_bytes += len(data) self.__tx_packets += 1
Flush outgoing data which OpenSSL put in our BIO to the transport.
_write_ssl
python
aiortc/aiortc
src/aiortc/rtcdtlstransport.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/rtcdtlstransport.py
BSD-3-Clause
def addTrack(self, track): """ Add a track whose media should be discarded. :param track: A :class:`aiortc.MediaStreamTrack`. """ if track not in self.__tracks: self.__tracks[track] = None
Add a track whose media should be discarded. :param track: A :class:`aiortc.MediaStreamTrack`.
addTrack
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
async def start(self) -> None: """ Start discarding media. """ for track, task in self.__tracks.items(): if task is None: self.__tracks[track] = asyncio.ensure_future(blackhole_consume(track))
Start discarding media.
start
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
async def stop(self) -> None: """ Stop discarding media. """ for task in self.__tracks.values(): if task is not None: task.cancel() self.__tracks = {}
Stop discarding media.
stop
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
def audio(self) -> MediaStreamTrack: """ A :class:`aiortc.MediaStreamTrack` instance if the file contains audio. """ return self.__audio
A :class:`aiortc.MediaStreamTrack` instance if the file contains audio.
audio
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
def video(self) -> MediaStreamTrack: """ A :class:`aiortc.MediaStreamTrack` instance if the file contains video. """ return self.__video
A :class:`aiortc.MediaStreamTrack` instance if the file contains video.
video
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
def addTrack(self, track: MediaStreamTrack) -> None: """ Add a track to be recorded. :param track: A :class:`aiortc.MediaStreamTrack`. """ if track.kind == "audio": if self.__container.format.name in ("wav", "alsa", "pulse"): codec_name = "pcm_s16le" elif self.__container.format.name == "mp3": codec_name = "mp3" elif self.__container.format.name == "ogg": codec_name = "libopus" else: codec_name = "aac" stream = self.__container.add_stream(codec_name) else: if self.__container.format.name == "image2": stream = self.__container.add_stream("png", rate=30) stream.pix_fmt = "rgb24" else: stream = self.__container.add_stream("libx264", rate=30) stream.pix_fmt = "yuv420p" self.__tracks[track] = MediaRecorderContext(stream)
Add a track to be recorded. :param track: A :class:`aiortc.MediaStreamTrack`.
addTrack
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
async def start(self) -> None: """ Start recording. """ for track, context in self.__tracks.items(): if context.task is None: context.task = asyncio.ensure_future(self.__run_track(track, context))
Start recording.
start
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
async def stop(self) -> None: """ Stop recording. """ if self.__container: for track, context in self.__tracks.items(): if context.task is not None: context.task.cancel() context.task = None for packet in context.stream.encode(None): self.__container.mux(packet) self.__tracks = {} if self.__container: self.__container.close() self.__container = None
Stop recording.
stop
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
def subscribe( self, track: MediaStreamTrack, buffered: bool = True ) -> MediaStreamTrack: """ Create a proxy around the given `track` for a new consumer. :param track: Source :class:`MediaStreamTrack` which is relayed. :param buffered: Whether there need a buffer between the source track and relayed track. :rtype: :class: MediaStreamTrack """ proxy = RelayStreamTrack(self, track, buffered) self.__log_debug("Create proxy %s for source %s", id(proxy), id(track)) if track not in self.__proxies: self.__proxies[track] = set() return proxy
Create a proxy around the given `track` for a new consumer. :param track: Source :class:`MediaStreamTrack` which is relayed. :param buffered: Whether there need a buffer between the source track and relayed track. :rtype: :class: MediaStreamTrack
subscribe
python
aiortc/aiortc
src/aiortc/contrib/media.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/media.py
BSD-3-Clause
def add_signaling_arguments(parser): """ Add signaling method arguments to an argparse.ArgumentParser. """ parser.add_argument( "--signaling", "-s", choices=["copy-and-paste", "tcp-socket", "unix-socket"], ) parser.add_argument( "--signaling-host", default="127.0.0.1", help="Signaling host (tcp-socket only)" ) parser.add_argument( "--signaling-port", default=1234, help="Signaling port (tcp-socket only)" ) parser.add_argument( "--signaling-path", default="aiortc.socket", help="Signaling socket path (unix-socket only)", )
Add signaling method arguments to an argparse.ArgumentParser.
add_signaling_arguments
python
aiortc/aiortc
src/aiortc/contrib/signaling.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/signaling.py
BSD-3-Clause
def create_signaling(args): """ Create a signaling method based on command-line arguments. """ if args.signaling == "tcp-socket": return TcpSocketSignaling(args.signaling_host, args.signaling_port) elif args.signaling == "unix-socket": return UnixSocketSignaling(args.signaling_path) else: return CopyAndPasteSignaling()
Create a signaling method based on command-line arguments.
create_signaling
python
aiortc/aiortc
src/aiortc/contrib/signaling.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/contrib/signaling.py
BSD-3-Clause
def target_bitrate(self) -> int: """ Target bitrate in bits per second. """ return self.__target_bitrate
Target bitrate in bits per second.
target_bitrate
python
aiortc/aiortc
src/aiortc/codecs/vpx.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/codecs/vpx.py
BSD-3-Clause
def target_bitrate(self) -> int: """ Target bitrate in bits per second. """ return self.__target_bitrate
Target bitrate in bits per second.
target_bitrate
python
aiortc/aiortc
src/aiortc/codecs/h264.py
https://github.com/aiortc/aiortc/blob/master/src/aiortc/codecs/h264.py
BSD-3-Clause
def up(self): """Bring up interface. Equivalent to ifconfig [iface] up.""" # Set new flags flags = self.ifflags | IFF_UP self.ifflags = flags self.get_mtu()
Bring up interface. Equivalent to ifconfig [iface] up.
up
python
aiortc/aiortc
examples/datachannel-vpn/tuntap.py
https://github.com/aiortc/aiortc/blob/master/examples/datachannel-vpn/tuntap.py
BSD-3-Clause
def down(self): """Bring down interface. Equivalent to ifconfig [iface] down.""" # Set new flags flags = self.ifflags & ~IFF_UP self.ifflags = flags
Bring down interface. Equivalent to ifconfig [iface] down.
down
python
aiortc/aiortc
examples/datachannel-vpn/tuntap.py
https://github.com/aiortc/aiortc/blob/master/examples/datachannel-vpn/tuntap.py
BSD-3-Clause
def is_up(self): """Return True if the interface is up, False otherwise.""" if self.ifflags & IFF_UP: return True else: return False
Return True if the interface is up, False otherwise.
is_up
python
aiortc/aiortc
examples/datachannel-vpn/tuntap.py
https://github.com/aiortc/aiortc/blob/master/examples/datachannel-vpn/tuntap.py
BSD-3-Clause
def open(self): """Open file corresponding to the TUN device.""" self.fd = open("/dev/net/tun", "rb+", buffering=0) tun_flags = IFF_TAP | IFF_NO_PI | IFF_PERSIST ifr = struct.pack("16sH", self.name, tun_flags) fcntl.ioctl(self.fd, TUNSETIFF, ifr) fcntl.ioctl(self.fd, TUNSETOWNER, os.getuid()) self.ifflags = self.ifflags | IFF_RUNNING
Open file corresponding to the TUN device.
open
python
aiortc/aiortc
examples/datachannel-vpn/tuntap.py
https://github.com/aiortc/aiortc/blob/master/examples/datachannel-vpn/tuntap.py
BSD-3-Clause
async def publish(plugin, player): """ Send video to the room. """ pc = RTCPeerConnection() pcs.add(pc) # configure media media = {"audio": False, "video": True} if player and player.audio: pc.addTrack(player.audio) media["audio"] = True if player and player.video: pc.addTrack(player.video) else: pc.addTrack(VideoStreamTrack()) # send offer await pc.setLocalDescription(await pc.createOffer()) request = {"request": "configure"} request.update(media) response = await plugin.send( { "body": request, "jsep": { "sdp": pc.localDescription.sdp, "trickle": False, "type": pc.localDescription.type, }, } ) # apply answer await pc.setRemoteDescription( RTCSessionDescription( sdp=response["jsep"]["sdp"], type=response["jsep"]["type"] ) )
Send video to the room.
publish
python
aiortc/aiortc
examples/janus/janus.py
https://github.com/aiortc/aiortc/blob/master/examples/janus/janus.py
BSD-3-Clause
def parse_line(line): """Parse information from a line in a requirements text file.""" if line.startswith('-r '): # Allow specifying requirements in other files target = line.split(' ')[1] for info in parse_require_file(target): yield info else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] elif '@git+' in line: info['package'] = line else: # Remove versioning from the package pat = '(' + '|'.join(['>=', '==', '>']) + ')' parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if len(parts) > 1: op, rest = parts[1:] if ';' in rest: # Handle platform specific dependencies # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies version, platform_deps = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest # NOQA info['version'] = (op, version) yield info
Parse information from a line in a requirements text file.
parse_requirements.parse_line
python
open-mmlab/mmaction2
setup.py
https://github.com/open-mmlab/mmaction2/blob/master/setup.py
Apache-2.0
def parse_requirements(fname='requirements.txt', with_version=True): """Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" """ import re import sys from os.path import exists require_fpath = fname def parse_line(line): """Parse information from a line in a requirements text file.""" if line.startswith('-r '): # Allow specifying requirements in other files target = line.split(' ')[1] for info in parse_require_file(target): yield info else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] elif '@git+' in line: info['package'] = line else: # Remove versioning from the package pat = '(' + '|'.join(['>=', '==', '>']) + ')' parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if len(parts) > 1: op, rest = parts[1:] if ';' in rest: # Handle platform specific dependencies # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies version, platform_deps = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest # NOQA info['version'] = (op, version) yield info def parse_require_file(fpath): with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if line and not line.startswith('#'): for info in parse_line(line): yield info def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] if with_version and 'version' in info: parts.extend(info['version']) if not sys.version.startswith('3.4'): # apparently package_deps are broken in 3.4 platform_deps = info.get('platform_deps') if platform_deps is not None: parts.append(';' + platform_deps) item = ''.join(parts) yield item packages = list(gen_packages_items()) return packages
Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
parse_requirements
python
open-mmlab/mmaction2
setup.py
https://github.com/open-mmlab/mmaction2/blob/master/setup.py
Apache-2.0
def add_mim_extension(): """Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise. """ # parse installment mode if 'develop' in sys.argv: # installed by `pip install -e .` mode = 'symlink' elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: # installed by `pip install .` # or create source distribution by `python setup.py sdist` mode = 'copy' else: return filenames = ['tools', 'configs', 'model-index.yml', 'dataset-index.yml'] repo_path = osp.dirname(__file__) mim_path = osp.join(repo_path, 'mmaction', '.mim') os.makedirs(mim_path, exist_ok=True) for filename in filenames: if osp.exists(filename): src_path = osp.join(repo_path, filename) tar_path = osp.join(mim_path, filename) if osp.isfile(tar_path) or osp.islink(tar_path): os.remove(tar_path) elif osp.isdir(tar_path): shutil.rmtree(tar_path) if mode == 'symlink': src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) try: os.symlink(src_relpath, tar_path) except OSError: # Creating a symbolic link on windows may raise an # `OSError: [WinError 1314]` due to privilege. If # the error happens, the src file will be copied mode = 'copy' warnings.warn( f'Failed to create a symbolic link for {src_relpath}, ' f'and it will be copied to {tar_path}') else: continue elif mode == 'copy': if osp.isfile(src_path): shutil.copyfile(src_path, tar_path) elif osp.isdir(src_path): shutil.copytree(src_path, tar_path) else: warnings.warn(f'Cannot copy file {src_path}.') else: raise ValueError(f'Invalid mode {mode}')
Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise.
add_mim_extension
python
open-mmlab/mmaction2
setup.py
https://github.com/open-mmlab/mmaction2/blob/master/setup.py
Apache-2.0
def available_models(): """Returns the names of available ActionCLIP models.""" return list(_MODELS.keys())
Returns the names of available ActionCLIP models.
available_models
python
open-mmlab/mmaction2
projects/actionclip/models/load.py
https://github.com/open-mmlab/mmaction2/blob/master/projects/actionclip/models/load.py
Apache-2.0
def get_bbox(keypoints): """Get bbox from keypoints.""" if len(keypoints) == 0: return [0, 0, 0, 0] x1, y1, _ = np.amin(keypoints, axis=0) x2, y2, _ = np.amax(keypoints, axis=0) w, h = x2 - x1, y2 - y1 return [x1, y1, w, h]
Get bbox from keypoints.
parse_halpe.get_bbox
python
open-mmlab/mmaction2
projects/gesture_recognition/parse_pose.py
https://github.com/open-mmlab/mmaction2/blob/master/projects/gesture_recognition/parse_pose.py
Apache-2.0
def parse_halpe(file_path, anno_idx): def get_bbox(keypoints): """Get bbox from keypoints.""" if len(keypoints) == 0: return [0, 0, 0, 0] x1, y1, _ = np.amin(keypoints, axis=0) x2, y2, _ = np.amax(keypoints, axis=0) w, h = x2 - x1, y2 - y1 return [x1, y1, w, h] with open(file_path) as f: contents = json.load(f) data_root = get_data_root(file_path) + '/' images = contents['images'] annos = contents['annotations'] images_out, annos_out = [], [] for img, anno in zip(images, annos): assert img['id'] == anno['image_id'] keypoints = np.array(anno['keypoints']).reshape(-1, 3) lefthand_kpts = keypoints[-42:-21, :] righthand_kpts = keypoints[-21:, :] left_mask = lefthand_kpts[:, 2] > 0 right_mask = righthand_kpts[:, 2] > 0 lefthand_box = get_bbox(lefthand_kpts[left_mask]) righthand_box = get_bbox(righthand_kpts[right_mask]) if max(lefthand_box) > 0: img_out = dict( file_name=data_root + img['file_name'], height=img['height'], width=img['width'], id=anno_idx) anno_out = dict( area=lefthand_box[2] * lefthand_box[3], iscrowd=anno['iscrowd'], image_id=anno_idx, bbox=lefthand_box, category_id=0, id=anno_idx) anno_idx += 1 images_out.append(img_out) annos_out.append(anno_out) if max(righthand_box) > 0: img_out = dict( file_name=data_root + img['file_name'], height=img['height'], width=img['width'], id=anno_idx) anno_out = dict( area=righthand_box[2] * righthand_box[3], iscrowd=anno['iscrowd'], image_id=anno_idx, bbox=righthand_box, category_id=0, id=anno_idx) anno_idx += 1 images_out.append(img_out) annos_out.append(anno_out) return images_out, annos_out, anno_idx
Get bbox from keypoints.
parse_halpe
python
open-mmlab/mmaction2
projects/gesture_recognition/parse_pose.py
https://github.com/open-mmlab/mmaction2/blob/master/projects/gesture_recognition/parse_pose.py
Apache-2.0
def get_sinusoid_encoding_table(n_position, d_hid, cur_frame=-1, pre_n_position=1568): """Sinusoid position encoding table.""" def get_position_angle_vec(position): return [ position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid) ] sinusoid_table = np.array( [get_position_angle_vec(pos_i) for pos_i in range(pre_n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 sinusoid_table = torch.tensor( sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0) print(f'n_position: {n_position}') print(f'pre_n_position: {pre_n_position}') if n_position // cur_frame * 8 != pre_n_position and cur_frame != -1: T = 8 # checkpoint frame P = 14 # checkpoint size C = d_hid new_P = int((n_position // cur_frame)**0.5) # testing size print( f'Pretraining uses 14x14, but current version is {new_P}x{new_P}') print('Interpolate the position embedding') sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) sinusoid_table = sinusoid_table.reshape(-1, P, P, C).permute(0, 3, 1, 2) sinusoid_table = torch.nn.functional.interpolate( sinusoid_table, size=(new_P, new_P), mode='bicubic', align_corners=False) # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C sinusoid_table = sinusoid_table.permute(0, 2, 3, 1).reshape( -1, T, new_P, new_P, C) sinusoid_table = sinusoid_table.flatten(1, 3) if cur_frame != -1 and cur_frame != 8: print(f'Pretraining uses 8 frames, but current frame is {cur_frame}') print('Interpolate the position embedding') T = 8 # checkpoint frame new_T = cur_frame # testing frame # interpolate P = int((n_position // cur_frame)**0.5) # testing size C = d_hid sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) sinusoid_table = sinusoid_table.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW, C, T sinusoid_table = torch.nn.functional.interpolate( sinusoid_table, size=new_T, mode='linear') sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute( 0, 4, 1, 2, 3) # B, T, H, W, C sinusoid_table = sinusoid_table.flatten(1, 3) if n_position == pre_n_position: return sinusoid_table else: print('Use learnable position embedding') return nn.Parameter(sinusoid_table, requires_grad=True)
Sinusoid position encoding table.
get_sinusoid_encoding_table
python
open-mmlab/mmaction2
projects/umt/models/vit.py
https://github.com/open-mmlab/mmaction2/blob/master/projects/umt/models/vit.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call.""" return super().forward(x)
Defines the computation performed at every call.
forward
python
open-mmlab/mmaction2
projects/example_project/models/example_net.py
https://github.com/open-mmlab/mmaction2/blob/master/projects/example_project/models/example_net.py
Apache-2.0
def test_transformer_adapter(): """Test transformer adapter.""" with pytest.raises(RuntimeError): num_segs_model = 8 num_segs_features = 9 adapter = TransformerAdapter( num_segs=num_segs_model, transformer_width=64, transformer_heads=8, transformer_layers=2) features = torch.randn(2, num_segs_features, 64) adapter(features) num_segs = 8 adapter = TransformerAdapter( num_segs=num_segs, transformer_width=64, transformer_heads=8, transformer_layers=2) adapter.init_weights() features = torch.randn(2, num_segs, 64) adapted_features = adapter(features) assert adapted_features.shape == torch.Size([2, 64])
Test transformer adapter.
test_transformer_adapter
python
open-mmlab/mmaction2
tests/models/similarity/test_adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/similarity/test_adapters.py
Apache-2.0
def test_simple_mean_adapter(): """Test simple mean adapter.""" adapter = SimpleMeanAdapter(dim=1) features = torch.randn(2, 8, 64) adapted_features = adapter(features) assert adapted_features.shape == torch.Size([2, 64]) adapter = SimpleMeanAdapter(dim=(1, 2)) features = torch.randn(2, 8, 2, 64) adapted_features = adapter(features) assert adapted_features.shape == torch.Size([2, 64])
Test simple mean adapter.
test_simple_mean_adapter
python
open-mmlab/mmaction2
tests/models/similarity/test_adapters.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/similarity/test_adapters.py
Apache-2.0
def test_TAM(): """test TAM.""" with pytest.raises(AssertionError): # alpha must be a positive integer TAM(16, 8, alpha=0, beta=4) with pytest.raises(AssertionError): # beta must be a positive integer TAM(16, 8, alpha=2, beta=0) with pytest.raises(AssertionError): # the channels number of x should be equal to self.in_channels of TAM tam = TAM(16, 8) x = torch.rand(64, 8, 112, 112) tam(x) tam = TAM(16, 8) x = torch.rand(32, 16, 112, 112) output = tam(x) assert output.shape == torch.Size([32, 16, 112, 112])
test TAM.
test_TAM
python
open-mmlab/mmaction2
tests/models/common/test_tam.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/common/test_tam.py
Apache-2.0
def test_gcn_head(): """Test GCNHead.""" with pytest.raises(AssertionError): GCNHead(4, 5)(torch.rand((1, 2, 6, 75, 17))) gcn_head = GCNHead(num_classes=60, in_channels=256) gcn_head.init_weights() feat = torch.rand(1, 2, 256, 75, 25) cls_scores = gcn_head(feat) assert gcn_head.num_classes == 60 assert gcn_head.in_channels == 256 assert cls_scores.shape == torch.Size([1, 60]) gcn_head = GCNHead(num_classes=60, in_channels=256, dropout=0.1) gcn_head.init_weights() feat = torch.rand(1, 2, 256, 75, 25) cls_scores = gcn_head(feat) assert gcn_head.num_classes == 60 assert gcn_head.in_channels == 256 assert cls_scores.shape == torch.Size([1, 60])
Test GCNHead.
test_gcn_head
python
open-mmlab/mmaction2
tests/models/heads/test_gcn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_gcn_head.py
Apache-2.0
def test_tsn_head(): """Test loss method, layer construction, attributes and forward function in tsn head.""" tsn_head = TSNHead(num_classes=4, in_channels=2048) tsn_head.init_weights() assert tsn_head.num_classes == 4 assert tsn_head.dropout_ratio == 0.4 assert tsn_head.in_channels == 2048 assert tsn_head.init_std == 0.01 assert tsn_head.consensus.dim == 1 assert tsn_head.spatial_type == 'avg' assert isinstance(tsn_head.dropout, nn.Dropout) assert tsn_head.dropout.p == tsn_head.dropout_ratio assert isinstance(tsn_head.fc_cls, nn.Linear) assert tsn_head.fc_cls.in_features == tsn_head.in_channels assert tsn_head.fc_cls.out_features == tsn_head.num_classes assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d) assert tsn_head.avg_pool.output_size == (1, 1) input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # tsn head inference num_segs = input_shape[0] cls_scores = tsn_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) # Test multi-class recognition multi_tsn_head = TSNHead( num_classes=4, in_channels=2048, loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0), multi_class=True, label_smooth_eps=0.01) multi_tsn_head.init_weights() assert multi_tsn_head.num_classes == 4 assert multi_tsn_head.dropout_ratio == 0.4 assert multi_tsn_head.in_channels == 2048 assert multi_tsn_head.init_std == 0.01 assert multi_tsn_head.consensus.dim == 1 assert isinstance(multi_tsn_head.dropout, nn.Dropout) assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio assert isinstance(multi_tsn_head.fc_cls, nn.Linear) assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d) assert multi_tsn_head.avg_pool.output_size == (1, 1) input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # multi-class tsn head inference num_segs = input_shape[0] cls_scores = tsn_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4])
Test loss method, layer construction, attributes and forward function in tsn head.
test_tsn_head
python
open-mmlab/mmaction2
tests/models/heads/test_tsn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_tsn_head.py
Apache-2.0
def test_tpn_head(): """Test loss method, layer construction, attributes and forward function in tpn head.""" tpn_head = TPNHead(num_classes=4, in_channels=2048) tpn_head.init_weights() assert hasattr(tpn_head, 'avg_pool2d') assert hasattr(tpn_head, 'avg_pool3d') assert isinstance(tpn_head.avg_pool3d, nn.AdaptiveAvgPool3d) assert tpn_head.avg_pool3d.output_size == (1, 1, 1) assert tpn_head.avg_pool2d is None input_shape = (4, 2048, 7, 7) feat = torch.rand(input_shape) # tpn head inference with num_segs num_segs = 2 cls_scores = tpn_head(feat, num_segs) assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d) assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7) assert cls_scores.shape == torch.Size([2, 4]) # tpn head inference with no num_segs input_shape = (2, 2048, 3, 7, 7) feat = torch.rand(input_shape) cls_scores = tpn_head(feat) assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d) assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7) assert cls_scores.shape == torch.Size([2, 4])
Test loss method, layer construction, attributes and forward function in tpn head.
test_tpn_head
python
open-mmlab/mmaction2
tests/models/heads/test_tpn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_tpn_head.py
Apache-2.0
def test_tsm_head(): """Test loss method, layer construction, attributes and forward function in tsm head.""" tsm_head = TSMHead(num_classes=4, in_channels=2048) tsm_head.init_weights() assert tsm_head.num_classes == 4 assert tsm_head.dropout_ratio == 0.8 assert tsm_head.in_channels == 2048 assert tsm_head.init_std == 0.001 assert tsm_head.consensus.dim == 1 assert tsm_head.spatial_type == 'avg' assert isinstance(tsm_head.dropout, nn.Dropout) assert tsm_head.dropout.p == tsm_head.dropout_ratio assert isinstance(tsm_head.fc_cls, nn.Linear) assert tsm_head.fc_cls.in_features == tsm_head.in_channels assert tsm_head.fc_cls.out_features == tsm_head.num_classes assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d) assert tsm_head.avg_pool.output_size == 1 input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # tsm head inference with no init num_segs = input_shape[0] cls_scores = tsm_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) # tsm head inference with init tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True) tsm_head.init_weights() cls_scores = tsm_head(feat, num_segs) assert cls_scores.shape == torch.Size([2, 4])
Test loss method, layer construction, attributes and forward function in tsm head.
test_tsm_head
python
open-mmlab/mmaction2
tests/models/heads/test_tsm_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_tsm_head.py
Apache-2.0
def test_i3d_head(): """Test loss method, layer construction, attributes and forward function in i3d head.""" i3d_head = I3DHead(num_classes=4, in_channels=2048) i3d_head.init_weights() assert i3d_head.num_classes == 4 assert i3d_head.dropout_ratio == 0.5 assert i3d_head.in_channels == 2048 assert i3d_head.init_std == 0.01 assert isinstance(i3d_head.dropout, nn.Dropout) assert i3d_head.dropout.p == i3d_head.dropout_ratio assert isinstance(i3d_head.fc_cls, nn.Linear) assert i3d_head.fc_cls.in_features == i3d_head.in_channels assert i3d_head.fc_cls.out_features == i3d_head.num_classes assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d) assert i3d_head.avg_pool.output_size == (1, 1, 1) input_shape = (3, 2048, 4, 7, 7) feat = torch.rand(input_shape) # i3d head inference cls_scores = i3d_head(feat) assert cls_scores.shape == torch.Size([3, 4])
Test loss method, layer construction, attributes and forward function in i3d head.
test_i3d_head
python
open-mmlab/mmaction2
tests/models/heads/test_i3d_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_i3d_head.py
Apache-2.0
def test_trn_head(): """Test loss method, layer construction, attributes and forward function in trn head.""" from mmaction.models.heads.trn_head import (RelationModule, RelationModuleMultiScale) trn_head = TRNHead(num_classes=4, in_channels=2048, relation_type='TRN') trn_head.init_weights() assert trn_head.num_classes == 4 assert trn_head.dropout_ratio == 0.8 assert trn_head.in_channels == 2048 assert trn_head.init_std == 0.001 assert trn_head.spatial_type == 'avg' relation_module = trn_head.consensus assert isinstance(relation_module, RelationModule) assert relation_module.hidden_dim == 256 assert isinstance(relation_module.classifier[3], nn.Linear) assert relation_module.classifier[3].out_features == trn_head.num_classes assert trn_head.dropout.p == trn_head.dropout_ratio assert isinstance(trn_head.dropout, nn.Dropout) assert isinstance(trn_head.fc_cls, nn.Linear) assert trn_head.fc_cls.in_features == trn_head.in_channels assert trn_head.fc_cls.out_features == trn_head.hidden_dim assert isinstance(trn_head.avg_pool, nn.AdaptiveAvgPool2d) assert trn_head.avg_pool.output_size == 1 input_shape = (8, 2048, 7, 7) feat = torch.rand(input_shape) # tsm head inference with no init num_segs = input_shape[0] cls_scores = trn_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) # tsm head inference with init trn_head = TRNHead( num_classes=4, in_channels=2048, num_segments=8, relation_type='TRNMultiScale') trn_head.init_weights() assert isinstance(trn_head.consensus, RelationModuleMultiScale) assert trn_head.consensus.scales == range(8, 1, -1) cls_scores = trn_head(feat, num_segs) assert cls_scores.shape == torch.Size([1, 4]) with pytest.raises(ValueError): trn_head = TRNHead( num_classes=4, in_channels=2048, num_segments=8, relation_type='RelationModlue')
Test loss method, layer construction, attributes and forward function in trn head.
test_trn_head
python
open-mmlab/mmaction2
tests/models/heads/test_trn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_trn_head.py
Apache-2.0
def test_timesformer_head(): """Test loss method, layer construction, attributes and forward function in timesformer head.""" timesformer_head = TimeSformerHead(num_classes=4, in_channels=64) timesformer_head.init_weights() assert timesformer_head.num_classes == 4 assert timesformer_head.in_channels == 64 assert timesformer_head.init_std == 0.02 input_shape = (2, 64) feat = torch.rand(input_shape) cls_scores = timesformer_head(feat) assert cls_scores.shape == torch.Size([2, 4])
Test loss method, layer construction, attributes and forward function in timesformer head.
test_timesformer_head
python
open-mmlab/mmaction2
tests/models/heads/test_timesformer_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_timesformer_head.py
Apache-2.0
def test_rgbpose_head(): """Test RGBPoseHead.""" rgbpose_head = RGBPoseHead( num_classes=4, in_channels=[2048, 512], dropout=dict(rgb=0.51, pose=0.49)) rgbpose_head.init_weights() assert rgbpose_head.num_classes == 4 assert rgbpose_head.dropout == dict(rgb=0.51, pose=0.49) assert rgbpose_head.in_channels == [2048, 512] assert rgbpose_head.init_std == 0.01 assert isinstance(rgbpose_head.dropout_rgb, nn.Dropout) assert isinstance(rgbpose_head.dropout_pose, nn.Dropout) assert rgbpose_head.dropout_rgb.p == rgbpose_head.dropout['rgb'] assert rgbpose_head.dropout_pose.p == rgbpose_head.dropout['pose'] assert isinstance(rgbpose_head.fc_rgb, nn.Linear) assert isinstance(rgbpose_head.fc_pose, nn.Linear) assert rgbpose_head.fc_rgb.in_features == rgbpose_head.in_channels[0] assert rgbpose_head.fc_rgb.out_features == rgbpose_head.num_classes assert rgbpose_head.fc_pose.in_features == rgbpose_head.in_channels[1] assert rgbpose_head.fc_pose.out_features == rgbpose_head.num_classes assert isinstance(rgbpose_head.avg_pool, nn.AdaptiveAvgPool3d) assert rgbpose_head.avg_pool.output_size == (1, 1, 1) feat_rgb = torch.rand((2, 2048, 8, 7, 7)) feat_pose = torch.rand((2, 512, 32, 7, 7)) cls_scores = rgbpose_head((feat_rgb, feat_pose)) assert cls_scores['rgb'].shape == torch.Size([2, 4]) assert cls_scores['pose'].shape == torch.Size([2, 4])
Test RGBPoseHead.
test_rgbpose_head
python
open-mmlab/mmaction2
tests/models/heads/test_rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_rgbpose_head.py
Apache-2.0
def test_x3d_head(): """Test loss method, layer construction, attributes and forward function in x3d head.""" x3d_head = X3DHead(in_channels=432, num_classes=4, fc1_bias=False) x3d_head.init_weights() assert x3d_head.num_classes == 4 assert x3d_head.dropout_ratio == 0.5 assert x3d_head.in_channels == 432 assert x3d_head.init_std == 0.01 assert isinstance(x3d_head.dropout, nn.Dropout) assert x3d_head.dropout.p == x3d_head.dropout_ratio assert isinstance(x3d_head.fc1, nn.Linear) assert x3d_head.fc1.in_features == x3d_head.in_channels assert x3d_head.fc1.out_features == x3d_head.mid_channels assert x3d_head.fc1.bias is None assert isinstance(x3d_head.fc2, nn.Linear) assert x3d_head.fc2.in_features == x3d_head.mid_channels assert x3d_head.fc2.out_features == x3d_head.num_classes assert isinstance(x3d_head.pool, nn.AdaptiveAvgPool3d) assert x3d_head.pool.output_size == (1, 1, 1) input_shape = (3, 432, 4, 7, 7) feat = torch.rand(input_shape) # i3d head inference cls_scores = x3d_head(feat) assert cls_scores.shape == torch.Size([3, 4])
Test loss method, layer construction, attributes and forward function in x3d head.
test_x3d_head
python
open-mmlab/mmaction2
tests/models/heads/test_x3d_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_x3d_head.py
Apache-2.0
def test_slowfast_head(): """Test loss method, layer construction, attributes and forward function in slowfast head.""" sf_head = SlowFastHead(num_classes=4, in_channels=2304) sf_head.init_weights() assert sf_head.num_classes == 4 assert sf_head.dropout_ratio == 0.8 assert sf_head.in_channels == 2304 assert sf_head.init_std == 0.01 assert isinstance(sf_head.dropout, nn.Dropout) assert sf_head.dropout.p == sf_head.dropout_ratio assert isinstance(sf_head.fc_cls, nn.Linear) assert sf_head.fc_cls.in_features == sf_head.in_channels assert sf_head.fc_cls.out_features == sf_head.num_classes assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d) assert sf_head.avg_pool.output_size == (1, 1, 1) input_shape = (3, 2048, 32, 7, 7) feat_slow = torch.rand(input_shape) input_shape = (3, 256, 4, 7, 7) feat_fast = torch.rand(input_shape) sf_head = SlowFastHead(num_classes=4, in_channels=2304) cls_scores = sf_head((feat_slow, feat_fast)) assert cls_scores.shape == torch.Size([3, 4])
Test loss method, layer construction, attributes and forward function in slowfast head.
test_slowfast_head
python
open-mmlab/mmaction2
tests/models/heads/test_slowfast_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_slowfast_head.py
Apache-2.0
def test_swin_backbone(): """Test swin backbone.""" with pytest.raises(AssertionError): SwinTransformer3D(arch='-t') with pytest.raises(AssertionError): SwinTransformer3D(arch={'embed_dims': 96}) with pytest.raises(AssertionError): SwinTransformer3D(arch={ 'embed_dims': 96, 'depths': [2, 2, 6], 'num_heads': [3, 6, 12, 24] }) with pytest.raises(AssertionError): SwinTransformer3D( arch={ 'embed_dims': 96, 'depths': [2, 2, 6, 2, 2], 'num_heads': [3, 6, 12, 24, 48] }) with pytest.raises(AssertionError): SwinTransformer3D(arch='t', out_indices=(4, )) with pytest.raises(TypeError): swin_t = SwinTransformer3D(arch='t', pretrained=[0, 1, 1]) swin_t.init_weights() with pytest.raises(TypeError): swin_t = SwinTransformer3D(arch='t') swin_t.init_weights(pretrained=[0, 1, 1]) swin_b = SwinTransformer3D(arch='b', pretrained=None, pretrained2d=False) swin_b.init_weights() swin_b.train() pretrained_url = 'https://download.openmmlab.com/mmaction/v1.0/' \ 'recognition/swin/swin_tiny_patch4_window7_224.pth' swin_t_pre = SwinTransformer3D( arch='t', pretrained=pretrained_url, pretrained2d=True) swin_t_pre.init_weights() swin_t_pre.train() from mmengine.runner.checkpoint import _load_checkpoint ckpt_2d = _load_checkpoint(pretrained_url, map_location='cpu') state_dict = ckpt_2d['model'] patch_embed_weight2d = state_dict['patch_embed.proj.weight'].data patch_embed_weight3d = swin_t_pre.patch_embed.proj.weight.data assert torch.equal( patch_embed_weight3d, patch_embed_weight2d.unsqueeze(2).expand_as(patch_embed_weight3d) / patch_embed_weight3d.shape[2]) norm = swin_t_pre.norm3 assert torch.equal(norm.weight.data, state_dict['norm.weight']) assert torch.equal(norm.bias.data, state_dict['norm.bias']) for name, param in swin_t_pre.named_parameters(): if 'relative_position_bias_table' in name: bias2d = state_dict[name] assert torch.equal( param.data, bias2d.repeat(2 * swin_t_pre.window_size[0] - 1, 1)) frozen_stages = 1 swin_t_frozen = SwinTransformer3D( arch='t', pretrained=None, pretrained2d=False, frozen_stages=frozen_stages) swin_t_frozen.init_weights() swin_t_frozen.train() for param in swin_t_frozen.patch_embed.parameters(): assert param.requires_grad is False for i in range(frozen_stages): layer = swin_t_frozen.layers[i] for param in layer.parameters(): assert param.requires_grad is False input_shape = (1, 3, 6, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) feat = swin_t_frozen(imgs) assert feat.shape == torch.Size([1, 768, 3, 2, 2]) input_shape = (1, 3, 5, 63, 63) imgs = generate_backbone_demo_inputs(input_shape) feat = swin_t_frozen(imgs) assert feat.shape == torch.Size([1, 768, 3, 2, 2]) swin_t_all_stages = SwinTransformer3D(arch='t', out_indices=(0, 1, 2, 3)) feats = swin_t_all_stages(imgs) assert feats[0].shape == torch.Size([1, 96, 3, 16, 16]) assert feats[1].shape == torch.Size([1, 192, 3, 8, 8]) assert feats[2].shape == torch.Size([1, 384, 3, 4, 4]) assert feats[3].shape == torch.Size([1, 768, 3, 2, 2]) swin_t_all_stages_after_ds = SwinTransformer3D( arch='t', out_indices=(0, 1, 2, 3), out_after_downsample=True) feats = swin_t_all_stages_after_ds(imgs) assert feats[0].shape == torch.Size([1, 192, 3, 8, 8]) assert feats[1].shape == torch.Size([1, 384, 3, 4, 4]) assert feats[2].shape == torch.Size([1, 768, 3, 2, 2]) assert feats[3].shape == torch.Size([1, 768, 3, 2, 2])
Test swin backbone.
test_swin_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_swin.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_swin.py
Apache-2.0
def test_c2d_backbone(): """Test c2d backbone.""" input_shape = (1, 3, 8, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # c2d inference test c2d_r50 = C2D(depth=50) c2d_r50.init_weights() c2d_r50.train() feat = c2d_r50(imgs) assert feat.shape == torch.Size([1, 2048, 4, 2, 2]) c2d_r101 = C2D(depth=101) c2d_r101.init_weights() c2d_r101.train() feat = c2d_r101(imgs) assert feat.shape == torch.Size([1, 2048, 4, 2, 2])
Test c2d backbone.
test_c2d_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_c2d.py
Apache-2.0
def test_mobilenetv2_tsm_backbone(): """Test mobilenetv2_tsm backbone.""" from mmcv.cnn import ConvModule from mmaction.models.backbones.mobilenet_v2 import InvertedResidual from mmaction.models.backbones.resnet_tsm import TemporalShift input_shape = (8, 3, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # mobilenetv2_tsm with width_mult = 1.0 mobilenetv2_tsm = MobileNetV2TSM(pretrained='mmcls://mobilenet_v2') mobilenetv2_tsm.init_weights() for cur_module in mobilenetv2_tsm.modules(): if isinstance(cur_module, InvertedResidual) and \ len(cur_module.conv) == 3 and \ cur_module.use_res_connect: assert isinstance(cur_module.conv[0], TemporalShift) assert cur_module.conv[0].num_segments == \ mobilenetv2_tsm.num_segments assert cur_module.conv[0].shift_div == mobilenetv2_tsm.shift_div assert isinstance(cur_module.conv[0].net, ConvModule) # TSM-MobileNetV2 with widen_factor = 1.0 forword feat = mobilenetv2_tsm(imgs) assert feat.shape == torch.Size([8, 1280, 2, 2]) # mobilenetv2 with widen_factor = 0.5 forword mobilenetv2_tsm_05 = MobileNetV2TSM(widen_factor=0.5, pretrained2d=False) mobilenetv2_tsm_05.init_weights() feat = mobilenetv2_tsm_05(imgs) assert feat.shape == torch.Size([8, 1280, 2, 2]) # mobilenetv2 with widen_factor = 1.5 forword mobilenetv2_tsm_15 = MobileNetV2TSM(widen_factor=1.5, pretrained2d=False) mobilenetv2_tsm_15.init_weights() feat = mobilenetv2_tsm_15(imgs) assert feat.shape == torch.Size([8, 1920, 2, 2])
Test mobilenetv2_tsm backbone.
test_mobilenetv2_tsm_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_mobilenet_v2_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobilenet_v2_tsm.py
Apache-2.0
def is_norm(modules): """Check if is one of the norms.""" if isinstance(modules, (GroupNorm, _BatchNorm)): return True return False
Check if is one of the norms.
test_mobilenetv2_backbone.is_norm
python
open-mmlab/mmaction2
tests/models/backbones/test_mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobilenet_v2.py
Apache-2.0
def is_block(modules): """Check if is ResNet building block.""" if isinstance(modules, (InvertedResidual, )): return True return False
Check if is ResNet building block.
test_mobilenetv2_backbone.is_block
python
open-mmlab/mmaction2
tests/models/backbones/test_mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobilenet_v2.py
Apache-2.0
def test_mobilenetv2_backbone(): """Test MobileNetV2. Modified from mmclassification. """ from torch.nn.modules import GroupNorm from mmaction.models.backbones.mobilenet_v2 import InvertedResidual def is_norm(modules): """Check if is one of the norms.""" if isinstance(modules, (GroupNorm, _BatchNorm)): return True return False def is_block(modules): """Check if is ResNet building block.""" if isinstance(modules, (InvertedResidual, )): return True return False with pytest.raises(TypeError): # pretrained must be a string path model = MobileNetV2() model.init_weights(pretrained=0) with pytest.raises(ValueError): # frozen_stages must in range(1, 9) MobileNetV2(frozen_stages=9) with pytest.raises(ValueError): # tout_indices in range(-1, 8) MobileNetV2(out_indices=[8]) input_shape = (1, 3, 224, 224) imgs = generate_backbone_demo_inputs(input_shape) # Test MobileNetV2 with first stage frozen frozen_stages = 1 model = MobileNetV2(frozen_stages=frozen_stages) model.init_weights() model.train() for mod in model.conv1.modules(): for param in mod.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test MobileNetV2 with all stages frozen frozen_stages = 8 model = MobileNetV2(frozen_stages=frozen_stages) model.init_weights() model.train() for mod in model.modules(): if not isinstance(mod, MobileNetV2): assert mod.training is False for param in mod.parameters(): assert param.requires_grad is False # Test MobileNetV2 with norm_eval=True model = MobileNetV2(norm_eval=True) model.init_weights() model.train() assert check_norm_state(model.modules(), False) # Test MobileNetV2 forward with widen_factor=1.0, pretrained model = MobileNetV2( widen_factor=1.0, out_indices=range(0, 8), pretrained='mmcls://mobilenet_v2') model.init_weights() model.train() assert check_norm_state(model.modules(), True) feat = model(imgs) assert len(feat) == 8 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) assert feat[7].shape == torch.Size((1, 1280, 7, 7)) # Test MobileNetV2 forward with widen_factor=0.5 model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) model.init_weights() model.train() feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 8, 112, 112)) assert feat[1].shape == torch.Size((1, 16, 56, 56)) assert feat[2].shape == torch.Size((1, 16, 28, 28)) assert feat[3].shape == torch.Size((1, 32, 14, 14)) assert feat[4].shape == torch.Size((1, 48, 14, 14)) assert feat[5].shape == torch.Size((1, 80, 7, 7)) assert feat[6].shape == torch.Size((1, 160, 7, 7)) # Test MobileNetV2 forward with widen_factor=2.0 model = MobileNetV2(widen_factor=2.0) model.init_weights() model.train() feat = model(imgs) assert feat.shape == torch.Size((1, 2560, 7, 7)) # Test MobileNetV2 forward with out_indices=None model = MobileNetV2(widen_factor=1.0) model.init_weights() model.train() feat = model(imgs) assert feat.shape == torch.Size((1, 1280, 7, 7)) # Test MobileNetV2 forward with dict(type='ReLU') model = MobileNetV2( widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7)) model.init_weights() model.train() feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) # Test MobileNetV2 with GroupNorm forward model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) for m in model.modules(): if is_norm(m): assert isinstance(m, _BatchNorm) model.init_weights() model.train() feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) # Test MobileNetV2 with BatchNorm forward model = MobileNetV2( widen_factor=1.0, norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), out_indices=range(0, 7)) for m in model.modules(): if is_norm(m): assert isinstance(m, GroupNorm) model.init_weights() model.train() feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) # Test MobileNetV2 with layers 1, 3, 5 out forward model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) model.init_weights() model.train() feat = model(imgs) assert len(feat) == 3 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 32, 28, 28)) assert feat[2].shape == torch.Size((1, 96, 14, 14)) # Test MobileNetV2 with checkpoint forward model = MobileNetV2( widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) for m in model.modules(): if is_block(m): assert m.with_cp model.init_weights() model.train() feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7))
Test MobileNetV2. Modified from mmclassification.
test_mobilenetv2_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobilenet_v2.py
Apache-2.0
def test_tanet_backbone(): """Test tanet backbone.""" with pytest.raises(NotImplementedError): # TA-Blocks are only based on Bottleneck block now tanet_18 = TANet(18, 8) tanet_18.init_weights() from mmaction.models.backbones.resnet import Bottleneck from mmaction.models.backbones.tanet import TABlock # tanet with depth 50 tanet_50 = TANet(50, 8) tanet_50.init_weights() for layer_name in tanet_50.res_layers: layer = getattr(tanet_50, layer_name) blocks = list(layer.children()) for block in blocks: assert isinstance(block, TABlock) assert isinstance(block.block, Bottleneck) assert block.tam.num_segments == block.num_segments assert block.tam.in_channels == block.block.conv1.out_channels input_shape = (8, 3, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) feat = tanet_50(imgs) assert feat.shape == torch.Size([8, 2048, 2, 2]) input_shape = (16, 3, 32, 32) imgs = generate_backbone_demo_inputs(input_shape) feat = tanet_50(imgs) assert feat.shape == torch.Size([16, 2048, 1, 1])
Test tanet backbone.
test_tanet_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_tanet.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_tanet.py
Apache-2.0
def test_resnet_backbone(): """Test resnet backbone.""" with pytest.raises(KeyError): # ResNet depth should be in [18, 34, 50, 101, 152] ResNet(20) with pytest.raises(AssertionError): # In ResNet: 1 <= num_stages <= 4 ResNet(50, num_stages=0) with pytest.raises(AssertionError): # In ResNet: 1 <= num_stages <= 4 ResNet(50, num_stages=5) with pytest.raises(AssertionError): # len(strides) == len(dilations) == num_stages ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) with pytest.raises(TypeError): # pretrain must be a str resnet50 = ResNet(50, pretrained=0) resnet50.init_weights() with pytest.raises(AssertionError): # style must be in ['pytorch', 'caffe'] ResNet(18, style='tensorflow') with pytest.raises(AssertionError): # assert not with_cp ResNet(18, with_cp=True) # resnet with depth 18, norm_eval False, initial weights resnet18 = ResNet(18) resnet18.init_weights() # resnet with depth 50, norm_eval True resnet50 = ResNet(50, norm_eval=True) resnet50.init_weights() resnet50.train() assert check_norm_state(resnet50.modules(), False) # resnet with depth 50, norm_eval True, pretrained resnet50_pretrain = ResNet( pretrained='torchvision://resnet50', depth=50, norm_eval=True) resnet50_pretrain.init_weights() resnet50_pretrain.train() assert check_norm_state(resnet50_pretrain.modules(), False) # resnet with depth 50, norm_eval True, frozen_stages 1 frozen_stages = 1 resnet50_frozen = ResNet(50, frozen_stages=frozen_stages) resnet50_frozen.init_weights() resnet50_frozen.train() assert resnet50_frozen.conv1.bn.training is False for layer in resnet50_frozen.conv1.modules(): for param in layer.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(resnet50_frozen, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # resnet with depth 50, partial batchnorm resnet_pbn = ResNet(50, partial_bn=True) resnet_pbn.train() count_bn = 0 for m in resnet_pbn.modules(): if isinstance(m, nn.BatchNorm2d): count_bn += 1 if count_bn >= 2: assert m.weight.requires_grad is False assert m.bias.requires_grad is False assert m.training is False else: assert m.weight.requires_grad is True assert m.bias.requires_grad is True assert m.training is True input_shape = (1, 3, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # resnet with depth 18 inference resnet18 = ResNet(18, norm_eval=False) resnet18.init_weights() resnet18.train() feat = resnet18(imgs) assert feat.shape == torch.Size([1, 512, 2, 2]) # resnet with depth 50 inference resnet50 = ResNet(50, norm_eval=False) resnet50.init_weights() resnet50.train() feat = resnet50(imgs) assert feat.shape == torch.Size([1, 2048, 2, 2]) # resnet with depth 50 in caffe style inference resnet50_caffe = ResNet(50, style='caffe', norm_eval=False) resnet50_caffe.init_weights() resnet50_caffe.train() feat = resnet50_caffe(imgs) assert feat.shape == torch.Size([1, 2048, 2, 2]) resnet50_flow = ResNet( depth=50, pretrained='torchvision://resnet50', in_channels=10) input_shape = (1, 10, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) feat = resnet50_flow(imgs) assert feat.shape == torch.Size([1, 2048, 2, 2]) resnet50 = ResNet( depth=50, pretrained='torchvision://resnet50', in_channels=3) input_shape = (1, 3, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) feat = resnet50(imgs) assert feat.shape == torch.Size([1, 2048, 2, 2])
Test resnet backbone.
test_resnet_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet.py
Apache-2.0
def test_mobileone_tsm_backbone(): """Test MobileOne TSM backbone.""" from mmpretrain.models.backbones.mobileone import MobileOneBlock from mmaction.models.backbones.resnet_tsm import TemporalShift model = MobileOneTSM('s0', pretrained2d=False) model.init_weights() for cur_module in model.modules(): if isinstance(cur_module, TemporalShift): # TemporalShift is a wrapper of MobileOneBlock assert isinstance(cur_module.net, MobileOneBlock) assert cur_module.num_segments == model.num_segments assert cur_module.shift_div == model.shift_div inputs = generate_backbone_demo_inputs((8, 3, 64, 64)) feat = model(inputs) assert feat.shape == torch.Size([8, 1024, 2, 2]) model = MobileOneTSM('s1', pretrained2d=False) feat = model(inputs) assert feat.shape == torch.Size([8, 1280, 2, 2]) model = MobileOneTSM('s2', pretrained2d=False) feat = model(inputs) assert feat.shape == torch.Size([8, 2048, 2, 2]) model = MobileOneTSM('s3', pretrained2d=False) feat = model(inputs) assert feat.shape == torch.Size([8, 2048, 2, 2]) model = MobileOneTSM('s4', pretrained2d=False) feat = model(inputs) assert feat.shape == torch.Size([8, 2048, 2, 2])
Test MobileOne TSM backbone.
test_mobileone_tsm_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobileone_tsm.py
Apache-2.0
def test_vit_backbone(): """Test vit backbone.""" x = torch.randn(1, 3, 8, 64, 64) model = VisionTransformer( img_size=64, num_frames=8, qkv_bias=True, drop_path_rate=0.2, init_values=0.1) model.init_weights() assert model(x).shape == torch.Size([1, 768]) model.eval() assert model(x).shape == torch.Size([1, 768]) model = VisionTransformer( img_size=64, num_frames=8, use_learnable_pos_emb=True, drop_rate=0.1, use_mean_pooling=False) model.init_weights() assert model(x).shape == torch.Size([1, 768]) model.eval() assert model(x).shape == torch.Size([1, 768])
Test vit backbone.
test_vit_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_vit_mae.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_vit_mae.py
Apache-2.0
def test_resnet_csn_backbone(): """Test resnet_csn backbone.""" with pytest.raises(ValueError): # Bottleneck mode must be "ip" or "ir" ResNet3dCSN(152, None, bottleneck_mode='id') input_shape = (2, 3, 6, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) resnet3d_csn_frozen = ResNet3dCSN( 152, None, bn_frozen=True, norm_eval=True) resnet3d_csn_frozen.train() for m in resnet3d_csn_frozen.modules(): if isinstance(m, _BatchNorm): for param in m.parameters(): assert param.requires_grad is False # Interaction-preserved channel-separated bottleneck block resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip') resnet3d_csn_ip.init_weights() resnet3d_csn_ip.train() for i, layer_name in enumerate(resnet3d_csn_ip.res_layers): layers = getattr(resnet3d_csn_ip, layer_name) num_blocks = resnet3d_csn_ip.stage_blocks[i] assert len(layers) == num_blocks for layer in layers: assert isinstance(layer.conv2, nn.Sequential) assert len(layer.conv2) == 2 assert layer.conv2[1].groups == layer.planes if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_csn_ip = resnet3d_csn_ip.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_csn_ip(imgs_gpu) assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) else: feat = resnet3d_csn_ip(imgs) assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) # Interaction-reduced channel-separated bottleneck block resnet3d_csn_ir = ResNet3dCSN(152, None, bottleneck_mode='ir') resnet3d_csn_ir.init_weights() resnet3d_csn_ir.train() for i, layer_name in enumerate(resnet3d_csn_ir.res_layers): layers = getattr(resnet3d_csn_ir, layer_name) num_blocks = resnet3d_csn_ir.stage_blocks[i] assert len(layers) == num_blocks for layer in layers: assert isinstance(layer.conv2, nn.Sequential) assert len(layer.conv2) == 1 assert layer.conv2[0].groups == layer.planes if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_csn_ir = resnet3d_csn_ir.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_csn_ir(imgs_gpu) assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) else: feat = resnet3d_csn_ir(imgs) assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) # Set training status = False resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip') resnet3d_csn_ip.init_weights() resnet3d_csn_ip.train(False) for module in resnet3d_csn_ip.children(): assert module.training is False
Test resnet_csn backbone.
test_resnet_csn_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet3d_csn.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet3d_csn.py
Apache-2.0
def test_aagcn_backbone(): """Test AAGCN backbone.""" register_all_modules() mode = 'spatial' batch_size, num_person, num_frames = 2, 2, 150 # openpose-18 layout num_joints = 18 model = AAGCN(graph_cfg=dict(layout='openpose', mode=mode)) model.init_weights() inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 18]) # nturgb+d layout num_joints = 25 model = AAGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) model.init_weights() inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 25]) # coco layout num_joints = 17 model = AAGCN(graph_cfg=dict(layout='coco', mode=mode)) model.init_weights() inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 17]) # custom settings # disable the attention module to degenerate AAGCN to AGCN model = AAGCN( graph_cfg=dict(layout='coco', mode=mode), gcn_attention=False) model.init_weights() output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 17])
Test AAGCN backbone.
test_aagcn_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_aagcn.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_aagcn.py
Apache-2.0
def test_x3d_backbone(): """Test x3d backbone.""" _ = OmniResNet() resnet50 = torchvision.models.resnet50() params = resnet50.state_dict() torch.save(params, './r50.pth') model = OmniResNet(pretrain_2d='./r50.pth') input_shape = (2, 3, 8, 64, 64) videos = generate_backbone_demo_inputs(input_shape) feat = model(videos) assert feat.shape == torch.Size([2, 2048, 8, 2, 2]) input_shape = (2, 3, 64, 64) images = generate_backbone_demo_inputs(input_shape) feat = model(images) assert feat.shape == torch.Size([2, 2048, 2, 2])
Test x3d backbone.
test_x3d_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet_omni.py
Apache-2.0
def test_x3d_backbone(): """Test x3d backbone.""" with pytest.raises(AssertionError): # In X3D: 1 <= num_stages <= 4 X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=0) with pytest.raises(AssertionError): # In X3D: 1 <= num_stages <= 4 X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=5) with pytest.raises(AssertionError): # len(spatial_strides) == num_stages X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, spatial_strides=(1, 2), num_stages=4) with pytest.raises(AssertionError): # se_style in ['half', 'all'] X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, se_style=None) with pytest.raises(AssertionError): # se_ratio should be None or > 0 X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, se_style='half', se_ratio=0) # x3d_s, no pretrained, norm_eval True x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=True) x3d_s.init_weights() x3d_s.train() assert check_norm_state(x3d_s.modules(), False) # x3d_l, no pretrained, norm_eval True x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=True) x3d_l.init_weights() x3d_l.train() assert check_norm_state(x3d_l.modules(), False) # x3d_s, no pretrained, norm_eval False x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=False) x3d_s.init_weights() x3d_s.train() assert check_norm_state(x3d_s.modules(), True) # x3d_l, no pretrained, norm_eval False x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=False) x3d_l.init_weights() x3d_l.train() assert check_norm_state(x3d_l.modules(), True) # x3d_s, no pretrained, frozen_stages, norm_eval False frozen_stages = 1 x3d_s_frozen = X3D( gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=False, frozen_stages=frozen_stages) x3d_s_frozen.init_weights() x3d_s_frozen.train() assert x3d_s_frozen.conv1_t.bn.training is False for param in x3d_s_frozen.conv1_s.parameters(): assert param.requires_grad is False for param in x3d_s_frozen.conv1_t.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(x3d_s_frozen, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # test zero_init_residual, zero_init_residual is True by default for m in x3d_s_frozen.modules(): if hasattr(m, 'conv3'): assert torch.equal(m.conv3.bn.weight, torch.zeros_like(m.conv3.bn.weight)) assert torch.equal(m.conv3.bn.bias, torch.zeros_like(m.conv3.bn.bias)) # x3d_s inference input_shape = (1, 3, 13, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): x3d_s_frozen = x3d_s_frozen.cuda() imgs_gpu = imgs.cuda() feat = x3d_s_frozen(imgs_gpu) assert feat.shape == torch.Size([1, 432, 13, 2, 2]) else: feat = x3d_s_frozen(imgs) assert feat.shape == torch.Size([1, 432, 13, 2, 2]) # x3d_m inference input_shape = (1, 3, 16, 96, 96) imgs = generate_backbone_demo_inputs(input_shape) # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): x3d_s_frozen = x3d_s_frozen.cuda() imgs_gpu = imgs.cuda() feat = x3d_s_frozen(imgs_gpu) assert feat.shape == torch.Size([1, 432, 16, 3, 3]) else: feat = x3d_s_frozen(imgs) assert feat.shape == torch.Size([1, 432, 16, 3, 3])
Test x3d backbone.
test_x3d_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_x3d.py
Apache-2.0
def test_c3d_backbone(): """Test c3d backbone.""" input_shape = (1, 3, 16, 24, 24) imgs = generate_backbone_demo_inputs(input_shape) # c3d inference test c3d = C3D(out_dim=512) c3d.init_weights() c3d.train() feat = c3d(imgs) assert feat.shape == torch.Size([1, 4096]) # c3d with bn inference test c3d_bn = C3D(out_dim=512, norm_cfg=dict(type='BN3d')) c3d_bn.init_weights() c3d_bn.train() feat = c3d_bn(imgs) assert feat.shape == torch.Size([1, 4096])
Test c3d backbone.
test_c3d_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_c3d.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_c3d.py
Apache-2.0
def test_slowonly_backbone(): """Test SlowOnly backbone.""" with pytest.raises(AssertionError): # SlowOnly should contain no lateral connection ResNet3dSlowOnly(depth=50, pretrained=None, lateral=True) # test SlowOnly for PoseC3D so_50 = ResNet3dSlowOnly( depth=50, pretrained=None, in_channels=17, base_channels=32, num_stages=3, out_indices=(2, ), stage_blocks=(4, 6, 3), conv1_stride_s=1, pool1_stride_s=1, inflate=(0, 1, 1), spatial_strides=(2, 2, 2), temporal_strides=(1, 1, 2), dilations=(1, 1, 1)) so_50.init_weights() so_50.train() # test SlowOnly with normal config so_50 = ResNet3dSlowOnly(depth=50, pretrained=None) so_50.init_weights() so_50.train() # SlowOnly inference test input_shape = (1, 3, 8, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): so_50 = so_50.cuda() imgs_gpu = imgs.cuda() feat = so_50(imgs_gpu) else: feat = so_50(imgs) assert feat.shape == torch.Size([1, 2048, 8, 2, 2])
Test SlowOnly backbone.
test_slowonly_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet3d_slowonly.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet3d_slowonly.py
Apache-2.0
def test_uniformer_backbone(): """Test uniformer backbone.""" input_shape = (1, 3, 16, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) model = UniFormer( depth=[3, 4, 8, 3], embed_dim=[64, 128, 320, 512], head_dim=64, drop_path_rate=0.1) model.init_weights() model.eval() assert model(imgs).shape == torch.Size([1, 512, 8, 2, 2])
Test uniformer backbone.
test_uniformer_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_uniformer.py
Apache-2.0
def test_resnet3d_backbone(): """Test resnet3d backbone.""" with pytest.raises(AssertionError): # In ResNet3d: 1 <= num_stages <= 4 ResNet3d(34, None, num_stages=0) with pytest.raises(AssertionError): # In ResNet3d: 1 <= num_stages <= 4 ResNet3d(34, None, num_stages=5) with pytest.raises(AssertionError): # In ResNet3d: 1 <= num_stages <= 4 ResNet3d(50, None, num_stages=0) with pytest.raises(AssertionError): # In ResNet3d: 1 <= num_stages <= 4 ResNet3d(50, None, num_stages=5) with pytest.raises(AssertionError): # len(spatial_strides) == len(temporal_strides) # == len(dilations) == num_stages ResNet3d( 50, None, spatial_strides=(1, ), temporal_strides=(1, 1), dilations=(1, 1, 1), num_stages=4) with pytest.raises(AssertionError): # len(spatial_strides) == len(temporal_strides) # == len(dilations) == num_stages ResNet3d( 34, None, spatial_strides=(1, ), temporal_strides=(1, 1), dilations=(1, 1, 1), num_stages=4) with pytest.raises(TypeError): # pretrain must be str or None. resnet3d_34 = ResNet3d(34, ['resnet', 'bninception']) resnet3d_34.init_weights() with pytest.raises(TypeError): # pretrain must be str or None. resnet3d_50 = ResNet3d(50, ['resnet', 'bninception']) resnet3d_50.init_weights() # resnet3d with depth 34, no pretrained, norm_eval True resnet3d_34 = ResNet3d(34, None, pretrained2d=False, norm_eval=True) resnet3d_34.init_weights() resnet3d_34.train() assert check_norm_state(resnet3d_34.modules(), False) # resnet3d with depth 50, no pretrained, norm_eval True resnet3d_50 = ResNet3d(50, None, pretrained2d=False, norm_eval=True) resnet3d_50.init_weights() resnet3d_50.train() assert check_norm_state(resnet3d_50.modules(), False) # resnet3d with depth 50, pretrained2d, norm_eval True resnet3d_50_pretrain = ResNet3d( 50, 'torchvision://resnet50', norm_eval=True) resnet3d_50_pretrain.init_weights() resnet3d_50_pretrain.train() assert check_norm_state(resnet3d_50_pretrain.modules(), False) from mmengine.runner.checkpoint import _load_checkpoint chkp_2d = _load_checkpoint('torchvision://resnet50') for name, module in resnet3d_50_pretrain.named_modules(): if len(name.split('.')) == 4: # layer.block.module.submodule prefix = name.split('.')[:2] module_type = name.split('.')[2] submodule_type = name.split('.')[3] if module_type == 'downsample': name2d = name.replace('conv', '0').replace('bn', '1') else: layer_id = name.split('.')[2][-1] name2d = prefix[0] + '.' + prefix[1] + '.' + \ submodule_type + layer_id if isinstance(module, nn.Conv3d): conv2d_weight = chkp_2d[name2d + '.weight'] conv3d_weight = getattr(module, 'weight').data assert torch.equal( conv3d_weight, conv2d_weight.data.unsqueeze(2).expand_as(conv3d_weight) / conv3d_weight.shape[2]) if getattr(module, 'bias') is not None: conv2d_bias = chkp_2d[name2d + '.bias'] conv3d_bias = getattr(module, 'bias').data assert torch.equal(conv2d_bias, conv3d_bias) elif isinstance(module, nn.BatchNorm3d): for pname in ['weight', 'bias', 'running_mean', 'running_var']: param_2d = chkp_2d[name2d + '.' + pname] param_3d = getattr(module, pname).data assert torch.equal(param_2d, param_3d) conv3d = resnet3d_50_pretrain.conv1.conv assert torch.equal( conv3d.weight, chkp_2d['conv1.weight'].unsqueeze(2).expand_as(conv3d.weight) / conv3d.weight.shape[2]) conv3d = resnet3d_50_pretrain.layer3[2].conv2.conv assert torch.equal( conv3d.weight, chkp_2d['layer3.2.conv2.weight'].unsqueeze(2).expand_as( conv3d.weight) / conv3d.weight.shape[2]) # resnet3d with depth 34, no pretrained, norm_eval False resnet3d_34_no_bn_eval = ResNet3d( 34, None, pretrained2d=False, norm_eval=False) resnet3d_34_no_bn_eval.init_weights() resnet3d_34_no_bn_eval.train() assert check_norm_state(resnet3d_34_no_bn_eval.modules(), True) # resnet3d with depth 50, no pretrained, norm_eval False resnet3d_50_no_bn_eval = ResNet3d( 50, None, pretrained2d=False, norm_eval=False) resnet3d_50_no_bn_eval.init_weights() resnet3d_50_no_bn_eval.train() assert check_norm_state(resnet3d_50_no_bn_eval.modules(), True) # resnet3d with depth 34, no pretrained, frozen_stages, norm_eval False frozen_stages = 1 resnet3d_34_frozen = ResNet3d( 34, None, pretrained2d=False, frozen_stages=frozen_stages) resnet3d_34_frozen.init_weights() resnet3d_34_frozen.train() assert resnet3d_34_frozen.conv1.bn.training is False for param in resnet3d_34_frozen.conv1.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(resnet3d_34_frozen, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # test zero_init_residual for m in resnet3d_34_frozen.modules(): if hasattr(m, 'conv2'): assert torch.equal(m.conv2.bn.weight, torch.zeros_like(m.conv2.bn.weight)) assert torch.equal(m.conv2.bn.bias, torch.zeros_like(m.conv2.bn.bias)) # resnet3d with depth 50, no pretrained, frozen_stages, norm_eval False frozen_stages = 1 resnet3d_50_frozen = ResNet3d( 50, None, pretrained2d=False, frozen_stages=frozen_stages) resnet3d_50_frozen.init_weights() resnet3d_50_frozen.train() assert resnet3d_50_frozen.conv1.bn.training is False for param in resnet3d_50_frozen.conv1.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(resnet3d_50_frozen, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # test zero_init_residual for m in resnet3d_50_frozen.modules(): if hasattr(m, 'conv3'): assert torch.equal(m.conv3.bn.weight, torch.zeros_like(m.conv3.bn.weight)) assert torch.equal(m.conv3.bn.bias, torch.zeros_like(m.conv3.bn.bias)) # resnet3d frozen with depth 34 inference input_shape = (1, 3, 6, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_34_frozen = resnet3d_34_frozen.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_34_frozen(imgs_gpu) assert feat.shape == torch.Size([1, 512, 3, 2, 2]) else: feat = resnet3d_34_frozen(imgs) assert feat.shape == torch.Size([1, 512, 3, 2, 2]) # resnet3d with depth 50 inference input_shape = (1, 3, 6, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_50_frozen = resnet3d_50_frozen.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_50_frozen(imgs_gpu) assert feat.shape == torch.Size([1, 2048, 3, 2, 2]) else: feat = resnet3d_50_frozen(imgs) assert feat.shape == torch.Size([1, 2048, 3, 2, 2]) # resnet3d with depth 50 in caffe style inference resnet3d_50_caffe = ResNet3d(50, None, pretrained2d=False, style='caffe') resnet3d_50_caffe.init_weights() resnet3d_50_caffe.train() # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_50_caffe = resnet3d_50_caffe.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_50_caffe(imgs_gpu) assert feat.shape == torch.Size([1, 2048, 3, 2, 2]) else: feat = resnet3d_50_caffe(imgs) assert feat.shape == torch.Size([1, 2048, 3, 2, 2]) # resnet3d with depth 34 in caffe style inference resnet3d_34_caffe = ResNet3d(34, None, pretrained2d=False, style='caffe') resnet3d_34_caffe.init_weights() resnet3d_34_caffe.train() # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_34_caffe = resnet3d_34_caffe.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_34_caffe(imgs_gpu) assert feat.shape == torch.Size([1, 512, 3, 2, 2]) else: feat = resnet3d_34_caffe(imgs) assert feat.shape == torch.Size([1, 512, 3, 2, 2]) # resnet3d with depth with 3x3x3 inflate_style inference resnet3d_50_1x1x1 = ResNet3d( 50, None, pretrained2d=False, inflate_style='3x3x3') resnet3d_50_1x1x1.init_weights() resnet3d_50_1x1x1.train() # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_50_1x1x1 = resnet3d_50_1x1x1.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_50_1x1x1(imgs_gpu) assert feat.shape == torch.Size([1, 2048, 3, 2, 2]) else: feat = resnet3d_50_1x1x1(imgs) assert feat.shape == torch.Size([1, 2048, 3, 2, 2]) resnet3d_34_1x1x1 = ResNet3d( 34, None, pretrained2d=False, inflate_style='3x3x3') resnet3d_34_1x1x1.init_weights() resnet3d_34_1x1x1.train() # parrots 3dconv is only implemented on gpu if torch.__version__ == 'parrots': if torch.cuda.is_available(): resnet3d_34_1x1x1 = resnet3d_34_1x1x1.cuda() imgs_gpu = imgs.cuda() feat = resnet3d_34_1x1x1(imgs_gpu) assert feat.shape == torch.Size([1, 512, 3, 2, 2]) else: feat = resnet3d_34_1x1x1(imgs) assert feat.shape == torch.Size([1, 512, 3, 2, 2]) # resnet3d with non-local module non_local_cfg = dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='embedded_gaussian') non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)) resnet3d_nonlocal = ResNet3d( 50, None, pretrained2d=False, non_local=non_local, non_local_cfg=non_local_cfg) resnet3d_nonlocal.init_weights() for layer_name in ['layer2', 'layer3']: layer = getattr(resnet3d_nonlocal, layer_name) for i, _ in enumerate(layer): if i % 2 == 0: assert hasattr(layer[i], 'non_local_block') feat = resnet3d_nonlocal(imgs) assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
Test resnet3d backbone.
test_resnet3d_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet3d.py
Apache-2.0
def test_stgcn_backbone(): """Test STGCN backbone.""" mode = 'stgcn_spatial' batch_size, num_person, num_frames = 2, 2, 150 # openpose-18 layout num_joints = 18 model = STGCN(graph_cfg=dict(layout='openpose', mode=mode)) model.init_weights() inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 18]) # nturgb+d layout num_joints = 25 model = STGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) model.init_weights() inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 25]) # coco layout num_joints = 17 model = STGCN(graph_cfg=dict(layout='coco', mode=mode)) model.init_weights() inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 17]) # custom settings # instantiate STGCN++ model = STGCN( graph_cfg=dict(layout='coco', mode='spatial'), gcn_adaptive='init', gcn_with_res=True, tcn_type='mstcn') model.init_weights() output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 17])
Test STGCN backbone.
test_stgcn_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_stgcn.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_stgcn.py
Apache-2.0
def test_uniformerv2_backbone(): """Test uniformer backbone.""" input_shape = (1, 3, 8, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) model = UniFormerV2( input_resolution=64, patch_size=16, width=768, layers=12, heads=12, t_size=8, dw_reduction=1.5, backbone_drop_path_rate=0., temporal_downsample=False, no_lmhra=True, double_lmhra=True, return_list=[8, 9, 10, 11], n_layers=4, n_dim=768, n_head=12, mlp_factor=4., drop_path_rate=0., clip_pretrained=False, mlp_dropout=[0.5, 0.5, 0.5, 0.5]) model.init_weights() model.eval() assert model(imgs).shape == torch.Size([1, 768]) # SthSth input_shape = (1, 3, 16, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) model = UniFormerV2( input_resolution=64, patch_size=16, width=768, layers=12, heads=12, t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., temporal_downsample=True, no_lmhra=False, double_lmhra=True, return_list=[8, 9, 10, 11], n_layers=4, n_dim=768, n_head=12, mlp_factor=4., drop_path_rate=0., clip_pretrained=False, mlp_dropout=[0.5, 0.5, 0.5, 0.5]) model.init_weights() model.eval() assert model(imgs).shape == torch.Size([1, 768])
Test uniformer backbone.
test_uniformerv2_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_uniformerv2.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_uniformerv2.py
Apache-2.0
def test_resnet_audio_backbone(): """Test ResNetAudio backbone.""" input_shape = (1, 1, 16, 16) spec = generate_backbone_demo_inputs(input_shape) # inference register_all_modules() audioonly = ResNetAudio(50, None) audioonly.init_weights() audioonly.train() feat = audioonly(spec) assert feat.shape == torch.Size([1, 1024, 2, 2])
Test ResNetAudio backbone.
test_resnet_audio_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet_audio.py
Apache-2.0
def test_slowfast_backbone(): """Test SlowFast backbone.""" with pytest.raises(TypeError): # cfg should be a dict ResNet3dSlowFast(slow_pathway=list(['foo', 'bar'])) with pytest.raises(KeyError): # pathway type should be implemented ResNet3dSlowFast(slow_pathway=dict(type='resnext')) # test slowfast with slow inflated sf_50_inflate = ResNet3dSlowFast( slow_pathway=dict( type='resnet3d', depth=50, pretrained='torchvision://resnet50', pretrained2d=True, lateral=True, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1))) sf_50_inflate.init_weights() sf_50_inflate.train() # test slowfast with no lateral connection sf_50_wo_lateral = ResNet3dSlowFast( None, slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1))) sf_50_wo_lateral.init_weights() sf_50_wo_lateral.train() # slowfast w/o lateral connection inference test input_shape = (1, 3, 8, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) feat = sf_50_wo_lateral(imgs) assert isinstance(feat, tuple) assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2]) assert feat[1].shape == torch.Size([1, 256, 8, 2, 2]) # test slowfast with frozen stages config frozen_slow = 3 sf_50 = ResNet3dSlowFast( None, slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, pretrained2d=True, lateral=True, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), frozen_stages=frozen_slow)) sf_50.init_weights() sf_50.train() for stage in range(1, sf_50.slow_path.num_stages): lateral_name = sf_50.slow_path.lateral_connections[stage - 1] conv_lateral = getattr(sf_50.slow_path, lateral_name) for mod in conv_lateral.modules(): if isinstance(mod, _BatchNorm): if stage <= frozen_slow: assert mod.training is False else: assert mod.training is True for param in conv_lateral.parameters(): if stage <= frozen_slow: assert param.requires_grad is False else: assert param.requires_grad is True # test slowfast with normal config sf_50 = ResNet3dSlowFast() sf_50.init_weights() sf_50.train() # slowfast inference test input_shape = (1, 3, 8, 64, 64) imgs = generate_backbone_demo_inputs(input_shape) feat = sf_50(imgs) assert isinstance(feat, tuple) assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2]) assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
Test SlowFast backbone.
test_slowfast_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet3d_slowfast.py
Apache-2.0
def test_rgbposeconv3d(): """Test RGBPoseConv3D backbone.""" with pytest.raises(AssertionError): RGBPoseConv3D(pose_drop_path=1.1, rgb_drop_path=1.1) rgbposec3d = RGBPoseConv3D() rgbposec3d.init_weights() rgbposec3d.train() imgs_shape = (1, 3, 8, 224, 224) heatmap_imgs_shape = (1, 17, 32, 56, 56) imgs = generate_backbone_demo_inputs(imgs_shape) heatmap_imgs = generate_backbone_demo_inputs(heatmap_imgs_shape) (x_rgb, x_pose) = rgbposec3d(imgs, heatmap_imgs) assert x_rgb.shape == torch.Size([1, 2048, 8, 7, 7]) assert x_pose.shape == torch.Size([1, 512, 32, 7, 7])
Test RGBPoseConv3D backbone.
test_rgbposeconv3d
python
open-mmlab/mmaction2
tests/models/backbones/test_rgbposeconv3d.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_rgbposeconv3d.py
Apache-2.0
def test_resnet_tin_backbone(): """Test resnet_tin backbone.""" with pytest.raises(AssertionError): # num_segments should be positive resnet_tin = ResNetTIN(50, num_segments=-1) resnet_tin.init_weights() from mmaction.models.backbones.resnet_tin import (CombineNet, TemporalInterlace) # resnet_tin with normal config resnet_tin = ResNetTIN(50) resnet_tin.init_weights() for layer_name in resnet_tin.res_layers: layer = getattr(resnet_tin, layer_name) blocks = list(layer.children()) for block in blocks: assert isinstance(block.conv1.conv, CombineNet) assert isinstance(block.conv1.conv.net1, TemporalInterlace) assert ( block.conv1.conv.net1.num_segments == resnet_tin.num_segments) assert block.conv1.conv.net1.shift_div == resnet_tin.shift_div # resnet_tin with partial batchnorm resnet_tin_pbn = ResNetTIN(50, partial_bn=True) resnet_tin_pbn.train() count_bn = 0 for m in resnet_tin_pbn.modules(): if isinstance(m, nn.BatchNorm2d): count_bn += 1 if count_bn >= 2: assert m.training is False assert m.weight.requires_grad is False assert m.bias.requires_grad is False else: assert m.training is True assert m.weight.requires_grad is True assert m.bias.requires_grad is True input_shape = (8, 3, 64, 64) imgs = generate_backbone_demo_inputs(input_shape).cuda() resnet_tin = resnet_tin.cuda() # resnet_tin with normal cfg inference feat = resnet_tin(imgs) assert feat.shape == torch.Size([8, 2048, 2, 2])
Test resnet_tin backbone.
test_resnet_tin_backbone
python
open-mmlab/mmaction2
tests/models/backbones/test_resnet_tin.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_resnet_tin.py
Apache-2.0
def test_tpn(): """Test TPN backbone.""" tpn_cfg = dict( in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=400, loss_weight=0.5)) with pytest.raises(AssertionError): tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_cfg_['in_channels'] = list(tpn_cfg_['in_channels']) TPN(**tpn_cfg_) with pytest.raises(AssertionError): tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_cfg_['out_channels'] = float(tpn_cfg_['out_channels']) TPN(**tpn_cfg_) with pytest.raises(AssertionError): tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_cfg_['downsample_cfg']['downsample_position'] = 'unsupport' TPN(**tpn_cfg_) for k in tpn_cfg: if not k.endswith('_cfg'): continue tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_cfg_[k] = list() with pytest.raises(AssertionError): TPN(**tpn_cfg_) with pytest.raises(ValueError): tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_cfg_['flow_type'] = 'unsupport' TPN(**tpn_cfg_) target_shape = (32, 1) target_ = generate_backbone_demo_inputs(target_shape).long().squeeze() x0_shape = (32, 1024, 1, 4, 4) x1_shape = (32, 2048, 1, 2, 2) x0 = generate_backbone_demo_inputs(x0_shape) x1 = generate_backbone_demo_inputs(x1_shape) x = [x0, x1] # ResNetTPN with 'cascade' flow_type tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_cascade = TPN(**tpn_cfg_) target = get_label(target_) feat, loss_aux = tpn_cascade(x, target) assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) assert len(loss_aux) == 1 # ResNetTPN with 'parallel' flow_type tpn_cfg_ = copy.deepcopy(tpn_cfg) tpn_parallel = TPN(flow_type='parallel', **tpn_cfg_) target = get_label(target_) feat, loss_aux = tpn_parallel(x, target) assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) assert len(loss_aux) == 1 # ResNetTPN with 'cascade' flow_type and target is None feat, loss_aux = tpn_cascade(x, None) assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) assert len(loss_aux) == 0 # ResNetTPN with 'parallel' flow_type and target is None feat, loss_aux = tpn_parallel(x, None) assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) assert len(loss_aux) == 0
Test TPN backbone.
test_tpn
python
open-mmlab/mmaction2
tests/models/necks/test_tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/necks/test_tpn.py
Apache-2.0
def test_fbo_head(): """Test layer construction, attributes and forward function in fbo head.""" lfb_prefix_path = osp.normpath( osp.join(osp.dirname(__file__), '../../data/lfb')) st_feat_shape = (1, 16, 1, 8, 8) st_feat = torch.rand(st_feat_shape) rois = torch.randn(1, 5) rois[0][0] = 0 img_metas = [dict(img_key='video_1, 930')] # non local fbo fbo_head = FBOHead( lfb_cfg=dict( lfb_prefix_path=lfb_prefix_path, max_num_sampled_feat=5, window_size=60, lfb_channels=16, dataset_modes=('unittest'), device='cpu'), fbo_cfg=dict( type='non_local', st_feat_channels=16, lt_feat_channels=16, latent_channels=8, num_st_feat=1, num_lt_feat=5 * 60, )) fbo_head.init_weights() out = fbo_head(st_feat, rois, img_metas) assert out.shape == (1, 24, 1, 1, 1) # avg fbo fbo_head = FBOHead( lfb_cfg=dict( lfb_prefix_path=lfb_prefix_path, max_num_sampled_feat=5, window_size=60, lfb_channels=16, dataset_modes=('unittest'), device='cpu'), fbo_cfg=dict(type='avg')) fbo_head.init_weights() out = fbo_head(st_feat, rois, img_metas) assert out.shape == (1, 32, 1, 1, 1) # max fbo fbo_head = FBOHead( lfb_cfg=dict( lfb_prefix_path=lfb_prefix_path, max_num_sampled_feat=5, window_size=60, lfb_channels=16, dataset_modes=('unittest'), device='cpu'), fbo_cfg=dict(type='max')) fbo_head.init_weights() out = fbo_head(st_feat, rois, img_metas) assert out.shape == (1, 32, 1, 1, 1)
Test layer construction, attributes and forward function in fbo head.
test_fbo_head
python
open-mmlab/mmaction2
tests/models/roi_heads/test_fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/roi_heads/test_fbo_head.py
Apache-2.0
def test_bbox_head_ava(): """Test loss method, layer construction, attributes and forward function in bbox head.""" with pytest.raises(TypeError): # topk must be None, int or tuple[int] BBoxHeadAVA(background_class=True, topk=0.1) with pytest.raises(AssertionError): # topk should be smaller than num_classes BBoxHeadAVA(background_class=True, num_classes=5, topk=(3, 5)) bbox_head = BBoxHeadAVA( background_class=True, in_channels=10, num_classes=4, topk=1) input = torch.randn([3, 10, 2, 2, 2]) ret = bbox_head(input) assert ret.shape == (3, 4) cls_score = torch.tensor( [[0.568, -0.162, 0.273, -0.390, 0.447, 0.102, -0.409], [2.388, 0.609, 0.369, 1.630, -0.808, -0.212, 0.296], [0.252, -0.533, -0.644, -0.591, 0.148, 0.963, -0.525], [0.134, -0.311, -0.764, -0.752, 0.656, -1.517, 0.185]]) # Test topk_to_matrix() assert torch.equal( BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 1), torch.tensor([[0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0]], dtype=bool)) assert torch.equal( BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 2), torch.tensor([[0, 1, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 0, 1]], dtype=bool)) assert torch.equal( BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 3), torch.tensor([[0, 1, 0, 1, 1, 0], [1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1], [1, 0, 0, 1, 0, 1]], dtype=bool)) assert torch.equal( BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 6), torch.ones([4, 6], dtype=bool)) # Test Multi-Label Loss bbox_head = BBoxHeadAVA( background_class=True) # Why is this here? isn't this redundant? bbox_head.init_weights() bbox_head = BBoxHeadAVA( background_class=True, temporal_pool_type='max', spatial_pool_type='avg') bbox_head.init_weights() # test without background class """ losses = bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=None, labels=labels, label_weights=label_weights) assert torch.isclose(losses['loss_action_cls'], torch.tensor(0.7162495)) assert torch.isclose(losses['recall@thr=0.5'], torch.tensor(0.6666666)) assert torch.isclose(losses['prec@thr=0.5'], torch.tensor(0.4791665)) assert torch.isclose(losses['recall@top3'], torch.tensor(0.75)) assert torch.isclose(losses['prec@top3'], torch.tensor(0.5)) assert torch.isclose(losses['recall@top5'], torch.tensor(1.0)) assert torch.isclose(losses['prec@top5'], torch.tensor(0.45)) # Test Single-Label Loss bbox_head = BBoxHeadAVA(multilabel=False) losses = bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=None, labels=labels, label_weights=label_weights) assert torch.isclose(losses['loss_action_cls'], torch.tensor(1.639561)) assert torch.isclose(losses['recall@thr=0.5'], torch.tensor(0.25)) assert torch.isclose(losses['prec@thr=0.5'], torch.tensor(0.25)) assert torch.isclose(losses['recall@top3'], torch.tensor(0.75)) assert torch.isclose(losses['prec@top3'], torch.tensor(0.5)) assert torch.isclose(losses['recall@top5'], torch.tensor(1.0)) assert torch.isclose(losses['prec@top5'], torch.tensor(0.45)) # Test ROI rois = torch.tensor([[0.0, 0.1, 0.2, 0.3, 0.4], [0.0, 0.5, 0.6, 0.7, 0.8]]) rois[1::2] *= 380 rois[2::2] *= 220 crop_quadruple = np.array([0.1, 0.2, 0.8, 0.7]) cls_score = torch.tensor([0.995, 0.728]) img_shape = (320, 480) flip = True bbox_head = BBoxHeadAVA(multilabel=True) bboxes, scores = bbox_head.get_det_bboxes( rois=rois, cls_score=cls_score, img_shape=img_shape, flip=flip, crop_quadruple=crop_quadruple) assert torch.all( torch.isclose( bboxes, torch.tensor([[0.89783341, 0.20043750, 0.89816672, 0.20087500], [0.45499998, 0.69875002, 0.58166665, 0.86499995]]))) assert torch.all( torch.isclose(scores, torch.tensor([0.73007441, 0.67436624]))) bbox_head = BBoxHeadAVA(multilabel=False) bboxes, scores = bbox_head.get_det_bboxes( rois=rois, cls_score=cls_score, img_shape=img_shape, flip=flip, crop_quadruple=crop_quadruple) assert torch.all( torch.isclose( bboxes, torch.tensor([[0.89783341, 0.20043750, 0.89816672, 0.20087500], [0.45499998, 0.69875002, 0.58166665, 0.86499995]]))) assert torch.all(torch.isclose(scores, torch.tensor([0.56636, 0.43364]))) """
Test loss method, layer construction, attributes and forward function in bbox head.
test_bbox_head_ava
python
open-mmlab/mmaction2
tests/models/roi_heads/test_bbox_heads.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/models/roi_heads/test_bbox_heads.py
Apache-2.0
def check_flip(origin_imgs, result_imgs, flip_type): """Check if the origin_imgs are flipped correctly into result_imgs in different flip_types.""" n, _, _, _ = np.shape(origin_imgs) if flip_type == 'horizontal': for i in range(n): if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])): return False else: # yapf: disable for i in range(n): if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501 return False # yapf: enable return True
Check if the origin_imgs are flipped correctly into result_imgs in different flip_types.
check_flip
python
open-mmlab/mmaction2
tests/datasets/transforms/test_wrappers.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_wrappers.py
Apache-2.0
def setup_class(cls): cls.data_prefix = osp.normpath( osp.join(osp.dirname(__file__), '../../data')) cls.img_path = osp.join(cls.data_prefix, 'test.jpg') cls.video_path = osp.join(cls.data_prefix, 'test.mp4') cls.wav_path = osp.join(cls.data_prefix, 'test.wav') cls.audio_spec_path = osp.join(cls.data_prefix, 'test.npy') cls.img_dir = osp.join(cls.data_prefix, 'imgs') cls.raw_feature_dir = osp.join(cls.data_prefix, 'activitynet_features') cls.bsp_feature_dir = osp.join(cls.data_prefix, 'bsp_features') cls.proposals_dir = osp.join(cls.data_prefix, 'proposals') cls.total_frames = 5 cls.filename_tmpl = 'img_{:05}.jpg' cls.flow_filename_tmpl = '{}_{:05d}.jpg' video_total_frames = len(mmcv.VideoReader(cls.video_path)) cls.audio_total_frames = video_total_frames cls.video_results = dict( filename=cls.video_path, label=1, total_frames=video_total_frames, start_index=0) cls.audio_results = dict( audios=np.random.randn(1280, ), audio_path=cls.wav_path, total_frames=cls.audio_total_frames, label=1, start_index=0) cls.audio_feature_results = dict( audios=np.random.randn(128, 80), audio_path=cls.audio_spec_path, total_frames=cls.audio_total_frames, label=1, start_index=0) cls.frame_results = dict( frame_dir=cls.img_dir, total_frames=cls.total_frames, filename_tmpl=cls.filename_tmpl, start_index=1, modality='RGB', offset=0, label=1) cls.flow_frame_results = dict( frame_dir=cls.img_dir, total_frames=cls.total_frames, filename_tmpl=cls.flow_filename_tmpl, modality='Flow', offset=0, label=1) cls.action_results = dict( video_name='v_test1', data_prefix=cls.raw_feature_dir, temporal_scale=5, boundary_ratio=0.1, duration_second=10, duration_frame=10, feature_frame=8, annotations=[{ 'segment': [3.0, 5.0], 'label': 'Rock climbing' }]) """ from mmaction.datasets.ssn_dataset import SSNInstance cls.proposal_results = dict( frame_dir=cls.img_dir, video_id='imgs', total_frames=cls.total_frames, filename_tmpl=cls.filename_tmpl, start_index=1, out_proposals=[[['imgs', SSNInstance(1, 4, 10, 1, 1, 1)], 0], [['imgs', SSNInstance(2, 5, 10, 2, 1, 1)], 0]]) """ cls.ava_results = dict( fps=30, timestamp=902, timestamp_start=840, shot_info=(0, 27000)) cls.hvu_label_example1 = dict( categories=['action', 'object', 'scene', 'concept'], category_nums=[2, 5, 3, 2], label=dict(action=[0], object=[2, 3], scene=[0, 1])) cls.hvu_label_example2 = dict( categories=['action', 'object', 'scene', 'concept'], category_nums=[2, 5, 3, 2], label=dict(action=[1], scene=[1, 2], concept=[1]))
from mmaction.datasets.ssn_dataset import SSNInstance cls.proposal_results = dict( frame_dir=cls.img_dir, video_id='imgs', total_frames=cls.total_frames, filename_tmpl=cls.filename_tmpl, start_index=1, out_proposals=[[['imgs', SSNInstance(1, 4, 10, 1, 1, 1)], 0], [['imgs', SSNInstance(2, 5, 10, 2, 1, 1)], 0]])
setup_class
python
open-mmlab/mmaction2
tests/datasets/transforms/test_sampling.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_sampling.py
Apache-2.0
def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1): """Check if the result_bbox is in correspond to result_imgs.""" def check_single_crop(origin_imgs, result_imgs, result_bbox): result_img_shape = result_imgs[0].shape[:2] crop_w = result_bbox[2] - result_bbox[0] crop_h = result_bbox[3] - result_bbox[1] crop_shape = (crop_h, crop_w) if not crop_shape == result_img_shape: return False left, top, right, bottom = result_bbox return np.array_equal( np.array(origin_imgs)[:, top:bottom, left:right, :], np.array(result_imgs)) if result_bbox.ndim == 1: return check_single_crop(origin_imgs, result_imgs, result_bbox) if result_bbox.ndim == 2: num_batch = len(origin_imgs) for i, bbox in enumerate(result_bbox): if num_crops == 10: if (i // num_batch) % 2 == 0: flag = check_single_crop([origin_imgs[i % num_batch]], [result_imgs[i]], bbox) else: flag = check_single_crop([origin_imgs[i % num_batch]], [np.flip(result_imgs[i], axis=1)], bbox) else: flag = check_single_crop([origin_imgs[i % num_batch]], [result_imgs[i]], bbox) if not flag: return False return True else: # bbox has a wrong dimension return False
Check if the result_bbox is in correspond to result_imgs.
check_crop
python
open-mmlab/mmaction2
tests/datasets/transforms/test_processing.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_processing.py
Apache-2.0
def check_flip(origin_imgs, result_imgs, flip_type): """Check if the origin_imgs are flipped correctly into result_imgs in different flip_types.""" n, _, _, _ = np.shape(origin_imgs) if flip_type == 'horizontal': for i in range(n): if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])): return False else: # yapf: disable for i in range(n): if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501 return False # yapf: enable return True
Check if the origin_imgs are flipped correctly into result_imgs in different flip_types.
check_flip
python
open-mmlab/mmaction2
tests/datasets/transforms/test_processing.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_processing.py
Apache-2.0
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None): """Calculate the ground truth confusion matrix.""" max_index = max(max(gt_labels), max(pred_labels)) confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64) for gt, pred in zip(gt_labels, pred_labels): confusion_mat[gt][pred] += 1 del_index = [] for i in range(max_index): if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0: del_index.append(i) confusion_mat = np.delete(confusion_mat, del_index, axis=0) confusion_mat = np.delete(confusion_mat, del_index, axis=1) if normalize is not None: confusion_mat = np.array(confusion_mat, dtype=np.float64) m, n = confusion_mat.shape if normalize == 'true': for i in range(m): s = np.sum(confusion_mat[i], dtype=float) if s == 0: continue confusion_mat[i, :] = confusion_mat[i, :] / s print(confusion_mat[i, :]) elif normalize == 'pred': for i in range(n): s = sum(confusion_mat[:, i]) if s == 0: continue confusion_mat[:, i] = confusion_mat[:, i] / s elif normalize == 'all': s = np.sum(confusion_mat) if s != 0: confusion_mat /= s return confusion_mat
Calculate the ground truth confusion matrix.
gt_confusion_matrix
python
open-mmlab/mmaction2
tests/evaluation/metrics/test_metric_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_metric_utils.py
Apache-2.0
def test_evaluate(self): """Test using the metric in the same way as Evalutor.""" pred = [ ActionDataSample().set_pred_score(i).set_gt_label(k).to_dict() for i, k in zip([ torch.tensor([0.7, 0.0, 0.3]), torch.tensor([0.5, 0.2, 0.3]), torch.tensor([0.4, 0.5, 0.1]), torch.tensor([0.0, 0.0, 1.0]), torch.tensor([0.0, 0.0, 1.0]), torch.tensor([0.0, 0.0, 1.0]), ], [[0], [0], [1], [2], [2], [0]]) ] # Test with score (use score instead of label if score exists) metric = METRICS.build(dict(type='RetrievalRecall', topk=1)) metric.process(None, pred) recall = metric.evaluate(6) self.assertIsInstance(recall, dict) self.assertAlmostEqual( recall['retrieval/Recall@1'], 5 / 6 * 100, places=4) # Test with invalid topk with self.assertRaisesRegex(RuntimeError, 'selected index k'): metric = METRICS.build(dict(type='RetrievalRecall', topk=10)) metric.process(None, pred) metric.evaluate(6) with self.assertRaisesRegex(ValueError, '`topk` must be a'): METRICS.build(dict(type='RetrievalRecall', topk=-1)) # Test initialization metric = METRICS.build(dict(type='RetrievalRecall', topk=5)) self.assertEqual(metric.topk, (5, )) # Test initialization metric = METRICS.build(dict(type='RetrievalRecall', topk=(1, 2, 5))) self.assertEqual(metric.topk, (1, 2, 5))
Test using the metric in the same way as Evalutor.
test_evaluate
python
open-mmlab/mmaction2
tests/evaluation/metrics/test_retrieval_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_retrieval_metric.py
Apache-2.0
def test_calculate(self): """Test using the metric from static method.""" # seq of indices format y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] y_pred = [np.arange(10)] * 2 # test with average is 'macro' recall_score = RetrievalRecall.calculate( y_pred, y_true, topk=1, pred_indices=True, target_indices=True) expect_recall = 50. self.assertEqual(recall_score[0].item(), expect_recall) # test with tensor input y_true = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 1, 0, 1, 0, 0, 0]]) y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=1) expect_recall = 50. self.assertEqual(recall_score[0].item(), expect_recall) # test with topk is 5 y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=2) expect_recall = 100. self.assertEqual(recall_score[0].item(), expect_recall) # test with topk is (1, 5) y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=(1, 5)) expect_recalls = [50., 100.] self.assertEqual(len(recall_score), len(expect_recalls)) for i in range(len(expect_recalls)): self.assertEqual(recall_score[i].item(), expect_recalls[i]) # Test with invalid pred y_pred = dict() y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] with self.assertRaisesRegex(AssertionError, '`pred` must be Seq'): RetrievalRecall.calculate(y_pred, y_true, True, True) # Test with invalid target y_true = dict() y_pred = [np.arange(10)] * 2 with self.assertRaisesRegex(AssertionError, '`target` must be Seq'): RetrievalRecall.calculate( y_pred, y_true, topk=1, pred_indices=True, target_indices=True) # Test with different length `pred` with `target` y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] y_pred = [np.arange(10)] * 3 with self.assertRaisesRegex(AssertionError, 'Length of `pred`'): RetrievalRecall.calculate( y_pred, y_true, topk=1, pred_indices=True, target_indices=True) # Test with invalid pred y_true = [[0, 2, 5, 8, 9], dict()] y_pred = [np.arange(10)] * 2 with self.assertRaisesRegex(AssertionError, '`target` should be'): RetrievalRecall.calculate( y_pred, y_true, topk=1, pred_indices=True, target_indices=True) # Test with invalid target y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] y_pred = [np.arange(10), dict()] with self.assertRaisesRegex(AssertionError, '`pred` should be'): RetrievalRecall.calculate( y_pred, y_true, topk=1, pred_indices=True, target_indices=True)
Test using the metric from static method.
test_calculate
python
open-mmlab/mmaction2
tests/evaluation/metrics/test_retrieval_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_retrieval_metric.py
Apache-2.0
def test_evaluate(self): """Test using the metric in the same way as Evalutor.""" pred = [ ActionDataSample().set_pred_score(i).set_pred_label( j).set_gt_label(k).to_dict() for i, j, k in zip([ torch.tensor([0.7, 0.0, 0.3]), torch.tensor([0.5, 0.2, 0.3]), torch.tensor([0.4, 0.5, 0.1]), torch.tensor([0.0, 0.0, 1.0]), torch.tensor([0.0, 0.0, 1.0]), torch.tensor([0.0, 0.0, 1.0]), ], [0, 0, 1, 2, 2, 2], [0, 0, 1, 2, 1, 0]) ] # Test with score (use score instead of label if score exists) metric = METRICS.build(dict(type='ConfusionMatrix')) metric.process(None, pred) res = metric.evaluate(6) self.assertIsInstance(res, dict) self.assertTensorEqual( res['confusion_matrix/result'], torch.tensor([ [2, 0, 1], [0, 1, 1], [0, 0, 1], ])) # Test with label for sample in pred: del sample['pred_score'] metric = METRICS.build(dict(type='ConfusionMatrix')) metric.process(None, pred) with self.assertRaisesRegex(AssertionError, 'Please specify the `num_classes`'): metric.evaluate(6) metric = METRICS.build(dict(type='ConfusionMatrix', num_classes=3)) metric.process(None, pred) self.assertIsInstance(res, dict) self.assertTensorEqual( res['confusion_matrix/result'], torch.tensor([ [2, 0, 1], [0, 1, 1], [0, 0, 1], ]))
Test using the metric in the same way as Evalutor.
test_evaluate
python
open-mmlab/mmaction2
tests/evaluation/metrics/test_acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_acc_metric.py
Apache-2.0
def parse_version_info(version_str: str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int or str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). """ version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info)
Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int or str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
parse_version_info
python
open-mmlab/mmaction2
mmaction/version.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/version.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> Tuple: """Extract features at the given stage. Args: inputs (torch.Tensor): The input skeleton with shape of `(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`. stage (str): The stage to output the features. Defaults to ``'backbone'``. Returns: tuple: THe extracted features and a dict recording the kwargs for downstream pipeline, which is an empty dict for the GCN-based recognizer. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() bs, nc = inputs.shape[:2] inputs = inputs.reshape((bs * nc, ) + inputs.shape[2:]) x = self.backbone(inputs) if stage == 'backbone': return x, loss_predict_kwargs if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features at the given stage. Args: inputs (torch.Tensor): The input skeleton with shape of `(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`. stage (str): The stage to output the features. Defaults to ``'backbone'``. Returns: tuple: THe extracted features and a dict recording the kwargs for downstream pipeline, which is an empty dict for the GCN-based recognizer.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_gcn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_gcn.py
Apache-2.0
def forward(self, *data_samples, mode: str, **kwargs) -> ForwardResults: """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: data_samples: should be a sequence of ``SampleList`` if ``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is the annotation data of one data source. It should be a single torch tensor if ``mode="tensor"``. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ if mode == 'loss' or mode == 'predict': if mode == 'loss': return self.loss(data_samples) return self.predict(data_samples) elif mode == 'tensor': assert isinstance(data_samples, torch.Tensor) data_ndim = data_samples.ndim if data_ndim not in [4, 5]: info = f'Input is a {data_ndim}D tensor. ' info += 'Only 4D (BCHW) or 5D (BCTHW) tensors are supported!' raise ValueError(info) return self._forward(data_samples, **kwargs)
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: data_samples: should be a sequence of ``SampleList`` if ``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is the annotation data of one data source. It should be a single torch tensor if ``mode="tensor"``. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def loss(self, data_samples: Sequence[SampleList]) -> dict: """Calculate losses from a batch of inputs and data samples. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: dict: A dictionary of loss components. """ loss_dict = {} for idx, data in enumerate(data_samples): inputs, data_samples = data['inputs'], data['data_samples'] feats = self.extract_feat(inputs) loss_cls = self.cls_head.loss(feats, data_samples) for key in loss_cls: loss_dict[key + f'_{idx}'] = loss_cls[key] return loss_dict
Calculate losses from a batch of inputs and data samples. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def predict(self, data_samples: Sequence[SampleList]) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, ) """ assert len(data_samples) == 1 feats = self.extract_feat(data_samples[0]['inputs'], test_mode=True) predictions = self.cls_head.predict(feats, data_samples[0]['data_samples']) return predictions
Predict results from a batch of inputs and data samples with post- processing. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, )
predict
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def _forward(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> ForwardResults: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head`` forward. """ feats, _ = self.extract_feat(inputs, stage=stage) return feats
Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head`` forward.
_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def _run_forward(self, data: Union[dict, tuple, list], mode: str) -> Union[Dict[str, torch.Tensor], list]: """Unpacks data for :meth:`forward` Args: data (dict or tuple or list): Data sampled from dataset. mode (str): Mode of forward. Returns: dict or list: Results of training or testing mode. """ if isinstance(data, dict): data = [data] results = self(*data, mode=mode) elif isinstance(data, (list, tuple)): results = self(*data, mode=mode) else: raise TypeError return results
Unpacks data for :meth:`forward` Args: data (dict or tuple or list): Data sampled from dataset. mode (str): Mode of forward. Returns: dict or list: Results of training or testing mode.
_run_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'backbone', test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``. """ if len(inputs.shape) == 6: inputs = inputs.view((-1, ) + inputs.shape[2:]) # Check settings of test if test_mode: x = self.backbone(inputs) return x else: # Return features extracted through backbone x = self.backbone(inputs) if stage == 'backbone': return x x = self.cls_head(x) return x
Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def extract_feat(self, batch_inputs: Tensor, stage: str = 'backbone', **kwargs) -> tuple: """Extract features of different stages. Args: batch_inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``backbone``. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. This will be an empty dict in audio recognizer. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() batch_inputs = batch_inputs.view((-1, ) + batch_inputs.shape[2:]) x = self.backbone(batch_inputs) if stage == 'backbone': return x, loss_predict_kwargs if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features of different stages. Args: batch_inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``backbone``. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. This will be an empty dict in audio recognizer.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_audio.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'neck', data_samples: SampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``neck``. data_samples (List[:obj:`ActionDataSample`]): Action data samples, which are only needed in training. Defaults to None. test_mode: (bool): Whether in test mode. Defaults to False. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``num_segs``, ``fcn_test``, ``loss_aux``. """ # Record the kwargs required by `loss` and `predict`. loss_predict_kwargs = dict() num_segs = inputs.shape[1] loss_predict_kwargs['num_segs'] = num_segs # [N, num_crops * num_segs, C, H, W] -> # [N * num_crops * num_segs, C, H, W] # `num_crops` is calculated by: # 1) `twice_sample` in `SampleFrames` # 2) `num_sample_positions` in `DenseSampleFrames` # 3) `ThreeCrop/TenCrop` in `test_pipeline` # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` inputs = inputs.view((-1, ) + inputs.shape[2:]) def forward_once(batch_imgs): # Extract features through backbone. if (hasattr(self.backbone, 'features') and self.backbone_from == 'torchvision'): x = self.backbone.features(batch_imgs) elif self.backbone_from == 'timm': x = self.backbone.forward_features(batch_imgs) elif self.backbone_from in ['mmcls', 'mmpretrain']: x = self.backbone(batch_imgs) if isinstance(x, tuple): assert len(x) == 1 x = x[0] else: x = self.backbone(batch_imgs) if self.backbone_from in ['torchvision', 'timm']: if not self.feature_shape: # Transformer-based feature shape: B x L x C. if len(x.shape) == 3: self.feature_shape = 'NLC' # Resnet-based feature shape: B x C x Hs x Ws. elif len(x.shape) == 4: self.feature_shape = 'NCHW' if self.feature_shape == 'NHWC': x = nn.AdaptiveAvgPool2d(1)(x.permute(0, 3, 1, 2)) # B x C x 1 x 1 elif self.feature_shape == 'NCHW': x = nn.AdaptiveAvgPool2d(1)(x) # B x C x 1 x 1 elif self.feature_shape == 'NLC': x = nn.AdaptiveAvgPool1d(1)(x.transpose(1, 2)) # B x C x 1 x = x.reshape((x.shape[0], -1)) # B x C x = x.reshape(x.shape + (1, 1)) # B x C x 1 x 1 return x # Check settings of `fcn_test`. fcn_test = False if test_mode: if self.test_cfg is not None and self.test_cfg.get( 'fcn_test', False): fcn_test = True num_segs = self.test_cfg.get('num_segs', self.backbone.num_segments) loss_predict_kwargs['fcn_test'] = fcn_test # inference with batch size of `max_testing_views` if set if self.test_cfg is not None and self.test_cfg.get( 'max_testing_views', False): max_testing_views = self.test_cfg.get('max_testing_views') assert isinstance(max_testing_views, int) # backbone specify num_segments num_segments = self.backbone.get('num_segments') if num_segments is not None: assert max_testing_views % num_segments == 0, \ 'make sure that max_testing_views is a multiple of ' \ 'num_segments, but got {max_testing_views} and '\ '{num_segments}' total_views = inputs.shape[0] view_ptr = 0 feats = [] while view_ptr < total_views: batch_imgs = inputs[view_ptr:view_ptr + max_testing_views] feat = forward_once(batch_imgs) if self.with_neck: feat, _ = self.neck(feat) feats.append(feat) view_ptr += max_testing_views def recursively_cat(feats): # recursively traverse feats until it's a tensor, # then concat out_feats = [] for e_idx, elem in enumerate(feats[0]): batch_elem = [feat[e_idx] for feat in feats] if not isinstance(elem, torch.Tensor): batch_elem = recursively_cat(batch_elem) else: batch_elem = torch.cat(batch_elem) out_feats.append(batch_elem) return tuple(out_feats) if isinstance(feats[0], tuple): x = recursively_cat(feats) else: x = torch.cat(feats) else: x = forward_once(inputs) else: x = forward_once(inputs) # Return features extracted through backbone. if stage == 'backbone': return x, loss_predict_kwargs loss_aux = dict() if self.with_neck: # x is a tuple with multiple feature maps. x = [ each.reshape((-1, num_segs) + each.shape[1:]).transpose(1, 2).contiguous() for each in x ] x, loss_aux = self.neck(x, data_samples=data_samples) if not fcn_test: x = x.squeeze(2) loss_predict_kwargs['num_segs'] = 1 elif fcn_test: # full convolution (fcn) testing when no neck # [N * num_crops * num_segs, C', H', W'] -> # [N * num_crops, C', num_segs, H', W'] x = x.reshape((-1, num_segs) + x.shape[1:]).transpose(1, 2).contiguous() loss_predict_kwargs['loss_aux'] = loss_aux # Return features extracted through neck. if stage == 'neck': return x, loss_predict_kwargs # Return raw logits through head. if self.with_cls_head and stage == 'head': # [N * num_crops, num_classes] x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features of different stages. Args: inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``neck``. data_samples (List[:obj:`ActionDataSample`]): Action data samples, which are only needed in training. Defaults to None. test_mode: (bool): Whether in test mode. Defaults to False. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``num_segs``, ``fcn_test``, ``loss_aux``.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer2d.py
Apache-2.0