Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
WeekMixin.get_previous_week
(self, date)
Get the previous valid week.
Get the previous valid week.
def get_previous_week(self, date): """Get the previous valid week.""" return _get_next_prev(self, date, is_previous=True, period='week')
[ "def", "get_previous_week", "(", "self", ",", "date", ")", ":", "return", "_get_next_prev", "(", "self", ",", "date", ",", "is_previous", "=", "True", ",", "period", "=", "'week'", ")" ]
[ 194, 4 ]
[ 196, 74 ]
python
en
['en', 'af', 'en']
True
WeekMixin._get_next_week
(self, date)
Return the start date of the next interval. The interval is defined by start date <= item date < next start date.
Return the start date of the next interval.
def _get_next_week(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ try: return date + datetime.timedelta(days=7 - self._get_weekday(date)) except OverflowError: raise Http404(_("Date out of range"))
[ "def", "_get_next_week", "(", "self", ",", "date", ")", ":", "try", ":", "return", "date", "+", "datetime", ".", "timedelta", "(", "days", "=", "7", "-", "self", ".", "_get_weekday", "(", "date", ")", ")", "except", "OverflowError", ":", "raise", "Http404", "(", "_", "(", "\"Date out of range\"", ")", ")" ]
[ 198, 4 ]
[ 207, 49 ]
python
en
['en', 'error', 'th']
False
WeekMixin._get_current_week
(self, date)
Return the start date of the current interval.
Return the start date of the current interval.
def _get_current_week(self, date): """Return the start date of the current interval.""" return date - datetime.timedelta(self._get_weekday(date))
[ "def", "_get_current_week", "(", "self", ",", "date", ")", ":", "return", "date", "-", "datetime", ".", "timedelta", "(", "self", ".", "_get_weekday", "(", "date", ")", ")" ]
[ 209, 4 ]
[ 211, 65 ]
python
en
['en', 'en', 'en']
True
WeekMixin._get_weekday
(self, date)
Return the weekday for a given date. The first day according to the week format is 0 and the last day is 6.
Return the weekday for a given date.
def _get_weekday(self, date): """ Return the weekday for a given date. The first day according to the week format is 0 and the last day is 6. """ week_format = self.get_week_format() if week_format in {'%W', '%V'}: # week starts on Monday return date.weekday() elif week_format == '%U': # week starts on Sunday return (date.weekday() + 1) % 7 else: raise ValueError("unknown week format: %s" % week_format)
[ "def", "_get_weekday", "(", "self", ",", "date", ")", ":", "week_format", "=", "self", ".", "get_week_format", "(", ")", "if", "week_format", "in", "{", "'%W'", ",", "'%V'", "}", ":", "# week starts on Monday", "return", "date", ".", "weekday", "(", ")", "elif", "week_format", "==", "'%U'", ":", "# week starts on Sunday", "return", "(", "date", ".", "weekday", "(", ")", "+", "1", ")", "%", "7", "else", ":", "raise", "ValueError", "(", "\"unknown week format: %s\"", "%", "week_format", ")" ]
[ 213, 4 ]
[ 225, 69 ]
python
en
['en', 'error', 'th']
False
DateMixin.get_date_field
(self)
Get the name of the date field to be used to filter by.
Get the name of the date field to be used to filter by.
def get_date_field(self): """Get the name of the date field to be used to filter by.""" if self.date_field is None: raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__) return self.date_field
[ "def", "get_date_field", "(", "self", ")", ":", "if", "self", ".", "date_field", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "\"%s.date_field is required.\"", "%", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "date_field" ]
[ 233, 4 ]
[ 237, 30 ]
python
en
['en', 'en', 'en']
True
DateMixin.get_allow_future
(self)
Return `True` if the view should be allowed to display objects from the future.
Return `True` if the view should be allowed to display objects from the future.
def get_allow_future(self): """ Return `True` if the view should be allowed to display objects from the future. """ return self.allow_future
[ "def", "get_allow_future", "(", "self", ")", ":", "return", "self", ".", "allow_future" ]
[ 239, 4 ]
[ 244, 32 ]
python
en
['en', 'error', 'th']
False
DateMixin.uses_datetime_field
(self)
Return `True` if the date field is a `DateTimeField` and `False` if it's a `DateField`.
Return `True` if the date field is a `DateTimeField` and `False` if it's a `DateField`.
def uses_datetime_field(self): """ Return `True` if the date field is a `DateTimeField` and `False` if it's a `DateField`. """ model = self.get_queryset().model if self.model is None else self.model field = model._meta.get_field(self.get_date_field()) return isinstance(field, models.DateTimeField)
[ "def", "uses_datetime_field", "(", "self", ")", ":", "model", "=", "self", ".", "get_queryset", "(", ")", ".", "model", "if", "self", ".", "model", "is", "None", "else", "self", ".", "model", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "self", ".", "get_date_field", "(", ")", ")", "return", "isinstance", "(", "field", ",", "models", ".", "DateTimeField", ")" ]
[ 250, 4 ]
[ 257, 54 ]
python
en
['en', 'error', 'th']
False
DateMixin._make_date_lookup_arg
(self, value)
Convert a date into a datetime when the date field is a DateTimeField. When time zone support is enabled, `date` is assumed to be in the current time zone, so that displayed items are consistent with the URL.
Convert a date into a datetime when the date field is a DateTimeField.
def _make_date_lookup_arg(self, value): """ Convert a date into a datetime when the date field is a DateTimeField. When time zone support is enabled, `date` is assumed to be in the current time zone, so that displayed items are consistent with the URL. """ if self.uses_datetime_field: value = datetime.datetime.combine(value, datetime.time.min) if settings.USE_TZ: value = timezone.make_aware(value) return value
[ "def", "_make_date_lookup_arg", "(", "self", ",", "value", ")", ":", "if", "self", ".", "uses_datetime_field", ":", "value", "=", "datetime", ".", "datetime", ".", "combine", "(", "value", ",", "datetime", ".", "time", ".", "min", ")", "if", "settings", ".", "USE_TZ", ":", "value", "=", "timezone", ".", "make_aware", "(", "value", ")", "return", "value" ]
[ 259, 4 ]
[ 270, 20 ]
python
en
['en', 'error', 'th']
False
DateMixin._make_single_date_lookup
(self, date)
Get the lookup kwargs for filtering on a single date. If the date field is a DateTimeField, we can't just filter on date_field=date because that doesn't take the time into account.
Get the lookup kwargs for filtering on a single date.
def _make_single_date_lookup(self, date): """ Get the lookup kwargs for filtering on a single date. If the date field is a DateTimeField, we can't just filter on date_field=date because that doesn't take the time into account. """ date_field = self.get_date_field() if self.uses_datetime_field: since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(date + datetime.timedelta(days=1)) return { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } else: # Skip self._make_date_lookup_arg, it's a no-op in this branch. return {date_field: date}
[ "def", "_make_single_date_lookup", "(", "self", ",", "date", ")", ":", "date_field", "=", "self", ".", "get_date_field", "(", ")", "if", "self", ".", "uses_datetime_field", ":", "since", "=", "self", ".", "_make_date_lookup_arg", "(", "date", ")", "until", "=", "self", ".", "_make_date_lookup_arg", "(", "date", "+", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", ")", "return", "{", "'%s__gte'", "%", "date_field", ":", "since", ",", "'%s__lt'", "%", "date_field", ":", "until", ",", "}", "else", ":", "# Skip self._make_date_lookup_arg, it's a no-op in this branch.", "return", "{", "date_field", ":", "date", "}" ]
[ 272, 4 ]
[ 289, 37 ]
python
en
['en', 'error', 'th']
False
BaseDateListView.get_dated_items
(self)
Obtain the list of dates and items.
Obtain the list of dates and items.
def get_dated_items(self): """Obtain the list of dates and items.""" raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
[ "def", "get_dated_items", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "'A DateView must provide an implementation of get_dated_items()'", ")" ]
[ 306, 4 ]
[ 308, 99 ]
python
en
['en', 'en', 'en']
True
BaseDateListView.get_ordering
(self)
Return the field or fields to use for ordering the queryset; use the date field by default.
Return the field or fields to use for ordering the queryset; use the date field by default.
def get_ordering(self): """ Return the field or fields to use for ordering the queryset; use the date field by default. """ return '-%s' % self.get_date_field() if self.ordering is None else self.ordering
[ "def", "get_ordering", "(", "self", ")", ":", "return", "'-%s'", "%", "self", ".", "get_date_field", "(", ")", "if", "self", ".", "ordering", "is", "None", "else", "self", ".", "ordering" ]
[ 310, 4 ]
[ 315, 88 ]
python
en
['en', 'error', 'th']
False
BaseDateListView.get_dated_queryset
(self, **lookup)
Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs.
Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs.
def get_dated_queryset(self, **lookup): """ Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs. """ qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{'%s__lte' % date_field: now}) if not allow_empty: # When pagination is enabled, it's better to do a cheap query # than to load the unpaginated queryset in memory. is_empty = not qs if paginate_by is None else not qs.exists() if is_empty: raise Http404(_("No %(verbose_name_plural)s available") % { 'verbose_name_plural': qs.model._meta.verbose_name_plural, }) return qs
[ "def", "get_dated_queryset", "(", "self", ",", "*", "*", "lookup", ")", ":", "qs", "=", "self", ".", "get_queryset", "(", ")", ".", "filter", "(", "*", "*", "lookup", ")", "date_field", "=", "self", ".", "get_date_field", "(", ")", "allow_future", "=", "self", ".", "get_allow_future", "(", ")", "allow_empty", "=", "self", ".", "get_allow_empty", "(", ")", "paginate_by", "=", "self", ".", "get_paginate_by", "(", "qs", ")", "if", "not", "allow_future", ":", "now", "=", "timezone", ".", "now", "(", ")", "if", "self", ".", "uses_datetime_field", "else", "timezone_today", "(", ")", "qs", "=", "qs", ".", "filter", "(", "*", "*", "{", "'%s__lte'", "%", "date_field", ":", "now", "}", ")", "if", "not", "allow_empty", ":", "# When pagination is enabled, it's better to do a cheap query", "# than to load the unpaginated queryset in memory.", "is_empty", "=", "not", "qs", "if", "paginate_by", "is", "None", "else", "not", "qs", ".", "exists", "(", ")", "if", "is_empty", ":", "raise", "Http404", "(", "_", "(", "\"No %(verbose_name_plural)s available\"", ")", "%", "{", "'verbose_name_plural'", ":", "qs", ".", "model", ".", "_meta", ".", "verbose_name_plural", ",", "}", ")", "return", "qs" ]
[ 317, 4 ]
[ 341, 17 ]
python
en
['en', 'error', 'th']
False
BaseDateListView.get_date_list_period
(self)
Get the aggregation period for the list of dates: 'year', 'month', or 'day'.
Get the aggregation period for the list of dates: 'year', 'month', or 'day'.
def get_date_list_period(self): """ Get the aggregation period for the list of dates: 'year', 'month', or 'day'. """ return self.date_list_period
[ "def", "get_date_list_period", "(", "self", ")", ":", "return", "self", ".", "date_list_period" ]
[ 343, 4 ]
[ 348, 36 ]
python
en
['en', 'error', 'th']
False
BaseDateListView.get_date_list
(self, queryset, date_type=None, ordering='ASC')
Get a date list by calling `queryset.dates/datetimes()`, checking along the way for empty lists that aren't allowed.
Get a date list by calling `queryset.dates/datetimes()`, checking along the way for empty lists that aren't allowed.
def get_date_list(self, queryset, date_type=None, ordering='ASC'): """ Get a date list by calling `queryset.dates/datetimes()`, checking along the way for empty lists that aren't allowed. """ date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and not date_list and not allow_empty: raise Http404( _("No %(verbose_name_plural)s available") % { 'verbose_name_plural': queryset.model._meta.verbose_name_plural, } ) return date_list
[ "def", "get_date_list", "(", "self", ",", "queryset", ",", "date_type", "=", "None", ",", "ordering", "=", "'ASC'", ")", ":", "date_field", "=", "self", ".", "get_date_field", "(", ")", "allow_empty", "=", "self", ".", "get_allow_empty", "(", ")", "if", "date_type", "is", "None", ":", "date_type", "=", "self", ".", "get_date_list_period", "(", ")", "if", "self", ".", "uses_datetime_field", ":", "date_list", "=", "queryset", ".", "datetimes", "(", "date_field", ",", "date_type", ",", "ordering", ")", "else", ":", "date_list", "=", "queryset", ".", "dates", "(", "date_field", ",", "date_type", ",", "ordering", ")", "if", "date_list", "is", "not", "None", "and", "not", "date_list", "and", "not", "allow_empty", ":", "raise", "Http404", "(", "_", "(", "\"No %(verbose_name_plural)s available\"", ")", "%", "{", "'verbose_name_plural'", ":", "queryset", ".", "model", ".", "_meta", ".", "verbose_name_plural", ",", "}", ")", "return", "date_list" ]
[ 350, 4 ]
[ 371, 24 ]
python
en
['en', 'error', 'th']
False
BaseArchiveIndexView.get_dated_items
(self)
Return (date_list, items, extra_context) for this request.
Return (date_list, items, extra_context) for this request.
def get_dated_items(self): """Return (date_list, items, extra_context) for this request.""" qs = self.get_dated_queryset() date_list = self.get_date_list(qs, ordering='DESC') if not date_list: qs = qs.none() return (date_list, qs, {})
[ "def", "get_dated_items", "(", "self", ")", ":", "qs", "=", "self", ".", "get_dated_queryset", "(", ")", "date_list", "=", "self", ".", "get_date_list", "(", "qs", ",", "ordering", "=", "'DESC'", ")", "if", "not", "date_list", ":", "qs", "=", "qs", ".", "none", "(", ")", "return", "(", "date_list", ",", "qs", ",", "{", "}", ")" ]
[ 380, 4 ]
[ 388, 34 ]
python
en
['en', 'en', 'en']
True
BaseYearArchiveView.get_dated_items
(self)
Return (date_list, items, extra_context) for this request.
Return (date_list, items, extra_context) for this request.
def get_dated_items(self): """Return (date_list, items, extra_context) for this request.""" year = self.get_year() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_year(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs) if not self.get_make_object_list(): # We need this to be a queryset since parent classes introspect it # to find information about the model. qs = qs.none() return (date_list, qs, { 'year': date, 'next_year': self.get_next_year(date), 'previous_year': self.get_previous_year(date), })
[ "def", "get_dated_items", "(", "self", ")", ":", "year", "=", "self", ".", "get_year", "(", ")", "date_field", "=", "self", ".", "get_date_field", "(", ")", "date", "=", "_date_from_string", "(", "year", ",", "self", ".", "get_year_format", "(", ")", ")", "since", "=", "self", ".", "_make_date_lookup_arg", "(", "date", ")", "until", "=", "self", ".", "_make_date_lookup_arg", "(", "self", ".", "_get_next_year", "(", "date", ")", ")", "lookup_kwargs", "=", "{", "'%s__gte'", "%", "date_field", ":", "since", ",", "'%s__lt'", "%", "date_field", ":", "until", ",", "}", "qs", "=", "self", ".", "get_dated_queryset", "(", "*", "*", "lookup_kwargs", ")", "date_list", "=", "self", ".", "get_date_list", "(", "qs", ")", "if", "not", "self", ".", "get_make_object_list", "(", ")", ":", "# We need this to be a queryset since parent classes introspect it", "# to find information about the model.", "qs", "=", "qs", ".", "none", "(", ")", "return", "(", "date_list", ",", "qs", ",", "{", "'year'", ":", "date", ",", "'next_year'", ":", "self", ".", "get_next_year", "(", "date", ")", ",", "'previous_year'", ":", "self", ".", "get_previous_year", "(", "date", ")", ",", "}", ")" ]
[ 401, 4 ]
[ 427, 10 ]
python
en
['en', 'en', 'en']
True
BaseYearArchiveView.get_make_object_list
(self)
Return `True` if this view should contain the full list of objects in the given year.
Return `True` if this view should contain the full list of objects in the given year.
def get_make_object_list(self): """ Return `True` if this view should contain the full list of objects in the given year. """ return self.make_object_list
[ "def", "get_make_object_list", "(", "self", ")", ":", "return", "self", ".", "make_object_list" ]
[ 429, 4 ]
[ 434, 36 ]
python
en
['en', 'error', 'th']
False
BaseMonthArchiveView.get_dated_items
(self)
Return (date_list, items, extra_context) for this request.
Return (date_list, items, extra_context) for this request.
def get_dated_items(self): """Return (date_list, items, extra_context) for this request.""" year = self.get_year() month = self.get_month() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_month(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs) return (date_list, qs, { 'month': date, 'next_month': self.get_next_month(date), 'previous_month': self.get_previous_month(date), })
[ "def", "get_dated_items", "(", "self", ")", ":", "year", "=", "self", ".", "get_year", "(", ")", "month", "=", "self", ".", "get_month", "(", ")", "date_field", "=", "self", ".", "get_date_field", "(", ")", "date", "=", "_date_from_string", "(", "year", ",", "self", ".", "get_year_format", "(", ")", ",", "month", ",", "self", ".", "get_month_format", "(", ")", ")", "since", "=", "self", ".", "_make_date_lookup_arg", "(", "date", ")", "until", "=", "self", ".", "_make_date_lookup_arg", "(", "self", ".", "_get_next_month", "(", "date", ")", ")", "lookup_kwargs", "=", "{", "'%s__gte'", "%", "date_field", ":", "since", ",", "'%s__lt'", "%", "date_field", ":", "until", ",", "}", "qs", "=", "self", ".", "get_dated_queryset", "(", "*", "*", "lookup_kwargs", ")", "date_list", "=", "self", ".", "get_date_list", "(", "qs", ")", "return", "(", "date_list", ",", "qs", ",", "{", "'month'", ":", "date", ",", "'next_month'", ":", "self", ".", "get_next_month", "(", "date", ")", ",", "'previous_month'", ":", "self", ".", "get_previous_month", "(", "date", ")", ",", "}", ")" ]
[ 446, 4 ]
[ 469, 10 ]
python
en
['en', 'en', 'en']
True
BaseWeekArchiveView.get_dated_items
(self)
Return (date_list, items, extra_context) for this request.
Return (date_list, items, extra_context) for this request.
def get_dated_items(self): """Return (date_list, items, extra_context) for this request.""" year = self.get_year() week = self.get_week() date_field = self.get_date_field() week_format = self.get_week_format() week_choices = {'%W': '1', '%U': '0', '%V': '1'} try: week_start = week_choices[week_format] except KeyError: raise ValueError('Unknown week format %r. Choices are: %s' % ( week_format, ', '.join(sorted(week_choices)), )) year_format = self.get_year_format() if week_format == '%V' and year_format != '%G': raise ValueError( "ISO week directive '%s' is incompatible with the year " "directive '%s'. Use the ISO year '%%G' instead." % ( week_format, year_format, ) ) date = _date_from_string(year, year_format, week_start, '%w', week, week_format) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_week(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'week': date, 'next_week': self.get_next_week(date), 'previous_week': self.get_previous_week(date), })
[ "def", "get_dated_items", "(", "self", ")", ":", "year", "=", "self", ".", "get_year", "(", ")", "week", "=", "self", ".", "get_week", "(", ")", "date_field", "=", "self", ".", "get_date_field", "(", ")", "week_format", "=", "self", ".", "get_week_format", "(", ")", "week_choices", "=", "{", "'%W'", ":", "'1'", ",", "'%U'", ":", "'0'", ",", "'%V'", ":", "'1'", "}", "try", ":", "week_start", "=", "week_choices", "[", "week_format", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Unknown week format %r. Choices are: %s'", "%", "(", "week_format", ",", "', '", ".", "join", "(", "sorted", "(", "week_choices", ")", ")", ",", ")", ")", "year_format", "=", "self", ".", "get_year_format", "(", ")", "if", "week_format", "==", "'%V'", "and", "year_format", "!=", "'%G'", ":", "raise", "ValueError", "(", "\"ISO week directive '%s' is incompatible with the year \"", "\"directive '%s'. Use the ISO year '%%G' instead.\"", "%", "(", "week_format", ",", "year_format", ",", ")", ")", "date", "=", "_date_from_string", "(", "year", ",", "year_format", ",", "week_start", ",", "'%w'", ",", "week", ",", "week_format", ")", "since", "=", "self", ".", "_make_date_lookup_arg", "(", "date", ")", "until", "=", "self", ".", "_make_date_lookup_arg", "(", "self", ".", "_get_next_week", "(", "date", ")", ")", "lookup_kwargs", "=", "{", "'%s__gte'", "%", "date_field", ":", "since", ",", "'%s__lt'", "%", "date_field", ":", "until", ",", "}", "qs", "=", "self", ".", "get_dated_queryset", "(", "*", "*", "lookup_kwargs", ")", "return", "(", "None", ",", "qs", ",", "{", "'week'", ":", "date", ",", "'next_week'", ":", "self", ".", "get_next_week", "(", "date", ")", ",", "'previous_week'", ":", "self", ".", "get_previous_week", "(", "date", ")", ",", "}", ")" ]
[ 480, 4 ]
[ 517, 10 ]
python
en
['en', 'en', 'en']
True
BaseDayArchiveView.get_dated_items
(self)
Return (date_list, items, extra_context) for this request.
Return (date_list, items, extra_context) for this request.
def get_dated_items(self): """Return (date_list, items, extra_context) for this request.""" year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) return self._get_dated_items(date)
[ "def", "get_dated_items", "(", "self", ")", ":", "year", "=", "self", ".", "get_year", "(", ")", "month", "=", "self", ".", "get_month", "(", ")", "day", "=", "self", ".", "get_day", "(", ")", "date", "=", "_date_from_string", "(", "year", ",", "self", ".", "get_year_format", "(", ")", ",", "month", ",", "self", ".", "get_month_format", "(", ")", ",", "day", ",", "self", ".", "get_day_format", "(", ")", ")", "return", "self", ".", "_get_dated_items", "(", "date", ")" ]
[ 527, 4 ]
[ 537, 42 ]
python
en
['en', 'en', 'en']
True
BaseDayArchiveView._get_dated_items
(self, date)
Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial.
Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial.
def _get_dated_items(self, date): """ Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial. """ lookup_kwargs = self._make_single_date_lookup(date) qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date) })
[ "def", "_get_dated_items", "(", "self", ",", "date", ")", ":", "lookup_kwargs", "=", "self", ".", "_make_single_date_lookup", "(", "date", ")", "qs", "=", "self", ".", "get_dated_queryset", "(", "*", "*", "lookup_kwargs", ")", "return", "(", "None", ",", "qs", ",", "{", "'day'", ":", "date", ",", "'previous_day'", ":", "self", ".", "get_previous_day", "(", "date", ")", ",", "'next_day'", ":", "self", ".", "get_next_day", "(", "date", ")", ",", "'previous_month'", ":", "self", ".", "get_previous_month", "(", "date", ")", ",", "'next_month'", ":", "self", ".", "get_next_month", "(", "date", ")", "}", ")" ]
[ 539, 4 ]
[ 553, 10 ]
python
en
['en', 'error', 'th']
False
BaseTodayArchiveView.get_dated_items
(self)
Return (date_list, items, extra_context) for this request.
Return (date_list, items, extra_context) for this request.
def get_dated_items(self): """Return (date_list, items, extra_context) for this request.""" return self._get_dated_items(datetime.date.today())
[ "def", "get_dated_items", "(", "self", ")", ":", "return", "self", ".", "_get_dated_items", "(", "datetime", ".", "date", ".", "today", "(", ")", ")" ]
[ 564, 4 ]
[ 566, 59 ]
python
en
['en', 'en', 'en']
True
BaseDateDetailView.get_object
(self, queryset=None)
Get the object this request displays.
Get the object this request displays.
def get_object(self, queryset=None): """Get the object this request displays.""" year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) # Use a custom queryset if provided qs = self.get_queryset() if queryset is None else queryset if not self.get_allow_future() and date > datetime.date.today(): raise Http404(_( "Future %(verbose_name_plural)s not available because " "%(class_name)s.allow_future is False." ) % { 'verbose_name_plural': qs.model._meta.verbose_name_plural, 'class_name': self.__class__.__name__, }) # Filter down a queryset from self.queryset using the date from the # URL. This'll get passed as the queryset to DetailView.get_object, # which'll handle the 404 lookup_kwargs = self._make_single_date_lookup(date) qs = qs.filter(**lookup_kwargs) return super().get_object(queryset=qs)
[ "def", "get_object", "(", "self", ",", "queryset", "=", "None", ")", ":", "year", "=", "self", ".", "get_year", "(", ")", "month", "=", "self", ".", "get_month", "(", ")", "day", "=", "self", ".", "get_day", "(", ")", "date", "=", "_date_from_string", "(", "year", ",", "self", ".", "get_year_format", "(", ")", ",", "month", ",", "self", ".", "get_month_format", "(", ")", ",", "day", ",", "self", ".", "get_day_format", "(", ")", ")", "# Use a custom queryset if provided", "qs", "=", "self", ".", "get_queryset", "(", ")", "if", "queryset", "is", "None", "else", "queryset", "if", "not", "self", ".", "get_allow_future", "(", ")", "and", "date", ">", "datetime", ".", "date", ".", "today", "(", ")", ":", "raise", "Http404", "(", "_", "(", "\"Future %(verbose_name_plural)s not available because \"", "\"%(class_name)s.allow_future is False.\"", ")", "%", "{", "'verbose_name_plural'", ":", "qs", ".", "model", ".", "_meta", ".", "verbose_name_plural", ",", "'class_name'", ":", "self", ".", "__class__", ".", "__name__", ",", "}", ")", "# Filter down a queryset from self.queryset using the date from the", "# URL. This'll get passed as the queryset to DetailView.get_object,", "# which'll handle the 404", "lookup_kwargs", "=", "self", ".", "_make_single_date_lookup", "(", "date", ")", "qs", "=", "qs", ".", "filter", "(", "*", "*", "lookup_kwargs", ")", "return", "super", "(", ")", ".", "get_object", "(", "queryset", "=", "qs", ")" ]
[ 579, 4 ]
[ 606, 46 ]
python
en
['en', 'en', 'en']
True
PipProvider.get_preference
( self, identifier: str, resolutions: Mapping[str, Candidate], candidates: Mapping[str, Iterator[Candidate]], information: Mapping[str, Iterator["PreferenceInformation"]], )
Produce a sort key for given requirement based on preference. The lower the return value is, the more preferred this group of arguments is. Currently pip considers the followings in order: * Prefer if any of the known requirements is "direct", e.g. points to an explicit URL. * If equal, prefer if any requirement is "pinned", i.e. contains operator ``===`` or ``==``. * If equal, calculate an approximate "depth" and resolve requirements closer to the user-specified requirements first. * Order user-specified requirements by the order they are specified. * If equal, prefers "non-free" requirements, i.e. contains at least one operator, such as ``>=`` or ``<``. * If equal, order alphabetically for consistency (helps debuggability).
Produce a sort key for given requirement based on preference.
def get_preference( self, identifier: str, resolutions: Mapping[str, Candidate], candidates: Mapping[str, Iterator[Candidate]], information: Mapping[str, Iterator["PreferenceInformation"]], ) -> "Preference": """Produce a sort key for given requirement based on preference. The lower the return value is, the more preferred this group of arguments is. Currently pip considers the followings in order: * Prefer if any of the known requirements is "direct", e.g. points to an explicit URL. * If equal, prefer if any requirement is "pinned", i.e. contains operator ``===`` or ``==``. * If equal, calculate an approximate "depth" and resolve requirements closer to the user-specified requirements first. * Order user-specified requirements by the order they are specified. * If equal, prefers "non-free" requirements, i.e. contains at least one operator, such as ``>=`` or ``<``. * If equal, order alphabetically for consistency (helps debuggability). """ lookups = (r.get_candidate_lookup() for r, _ in information[identifier]) candidate, ireqs = zip(*lookups) operators = [ specifier.operator for specifier_set in (ireq.specifier for ireq in ireqs if ireq) for specifier in specifier_set ] direct = candidate is not None pinned = any(op[:2] == "==" for op in operators) unfree = bool(operators) try: requested_order: Union[int, float] = self._user_requested[identifier] except KeyError: requested_order = math.inf parent_depths = ( self._known_depths[parent.name] if parent is not None else 0.0 for _, parent in information[identifier] ) inferred_depth = min(d for d in parent_depths) + 1.0 self._known_depths[identifier] = inferred_depth else: inferred_depth = 1.0 requested_order = self._user_requested.get(identifier, math.inf) # Requires-Python has only one candidate and the check is basically # free, so we always do it first to avoid needless work if it fails. requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER # HACK: Setuptools have a very long and solid backward compatibility # track record, and extremely few projects would request a narrow, # non-recent version range of it since that would break a lot things. # (Most projects specify it only to request for an installer feature, # which does not work, but that's another topic.) Intentionally # delaying Setuptools helps reduce branches the resolver has to check. # This serves as a temporary fix for issues like "apache-airlfow[all]" # while we work on "proper" branch pruning techniques. delay_this = identifier == "setuptools" return ( not requires_python, delay_this, not direct, not pinned, inferred_depth, requested_order, not unfree, identifier, )
[ "def", "get_preference", "(", "self", ",", "identifier", ":", "str", ",", "resolutions", ":", "Mapping", "[", "str", ",", "Candidate", "]", ",", "candidates", ":", "Mapping", "[", "str", ",", "Iterator", "[", "Candidate", "]", "]", ",", "information", ":", "Mapping", "[", "str", ",", "Iterator", "[", "\"PreferenceInformation\"", "]", "]", ",", ")", "->", "\"Preference\"", ":", "lookups", "=", "(", "r", ".", "get_candidate_lookup", "(", ")", "for", "r", ",", "_", "in", "information", "[", "identifier", "]", ")", "candidate", ",", "ireqs", "=", "zip", "(", "*", "lookups", ")", "operators", "=", "[", "specifier", ".", "operator", "for", "specifier_set", "in", "(", "ireq", ".", "specifier", "for", "ireq", "in", "ireqs", "if", "ireq", ")", "for", "specifier", "in", "specifier_set", "]", "direct", "=", "candidate", "is", "not", "None", "pinned", "=", "any", "(", "op", "[", ":", "2", "]", "==", "\"==\"", "for", "op", "in", "operators", ")", "unfree", "=", "bool", "(", "operators", ")", "try", ":", "requested_order", ":", "Union", "[", "int", ",", "float", "]", "=", "self", ".", "_user_requested", "[", "identifier", "]", "except", "KeyError", ":", "requested_order", "=", "math", ".", "inf", "parent_depths", "=", "(", "self", ".", "_known_depths", "[", "parent", ".", "name", "]", "if", "parent", "is", "not", "None", "else", "0.0", "for", "_", ",", "parent", "in", "information", "[", "identifier", "]", ")", "inferred_depth", "=", "min", "(", "d", "for", "d", "in", "parent_depths", ")", "+", "1.0", "self", ".", "_known_depths", "[", "identifier", "]", "=", "inferred_depth", "else", ":", "inferred_depth", "=", "1.0", "requested_order", "=", "self", ".", "_user_requested", ".", "get", "(", "identifier", ",", "math", ".", "inf", ")", "# Requires-Python has only one candidate and the check is basically", "# free, so we always do it first to avoid needless work if it fails.", "requires_python", "=", "identifier", "==", "REQUIRES_PYTHON_IDENTIFIER", "# HACK: Setuptools have a very long and solid backward compatibility", "# track record, and extremely few projects would request a narrow,", "# non-recent version range of it since that would break a lot things.", "# (Most projects specify it only to request for an installer feature,", "# which does not work, but that's another topic.) Intentionally", "# delaying Setuptools helps reduce branches the resolver has to check.", "# This serves as a temporary fix for issues like \"apache-airlfow[all]\"", "# while we work on \"proper\" branch pruning techniques.", "delay_this", "=", "identifier", "==", "\"setuptools\"", "return", "(", "not", "requires_python", ",", "delay_this", ",", "not", "direct", ",", "not", "pinned", ",", "inferred_depth", ",", "requested_order", ",", "not", "unfree", ",", "identifier", ",", ")" ]
[ 68, 4 ]
[ 143, 9 ]
python
en
['en', 'en', 'en']
True
opener_for
(ca_bundle=None)
Get a urlopen() replacement that uses ca_bundle for verification
Get a urlopen() replacement that uses ca_bundle for verification
def opener_for(ca_bundle=None): """Get a urlopen() replacement that uses ca_bundle for verification""" return urllib.request.build_opener( VerifyingHTTPSHandler(ca_bundle or find_ca_bundle()) ).open
[ "def", "opener_for", "(", "ca_bundle", "=", "None", ")", ":", "return", "urllib", ".", "request", ".", "build_opener", "(", "VerifyingHTTPSHandler", "(", "ca_bundle", "or", "find_ca_bundle", "(", ")", ")", ")", ".", "open" ]
[ 204, 0 ]
[ 208, 10 ]
python
en
['en', 'en', 'en']
True
find_ca_bundle
()
Return an existing CA bundle path, or None
Return an existing CA bundle path, or None
def find_ca_bundle(): """Return an existing CA bundle path, or None""" extant_cert_paths = filter(os.path.isfile, cert_paths) return ( get_win_certfile() or next(extant_cert_paths, None) or _certifi_where() )
[ "def", "find_ca_bundle", "(", ")", ":", "extant_cert_paths", "=", "filter", "(", "os", ".", "path", ".", "isfile", ",", "cert_paths", ")", "return", "(", "get_win_certfile", "(", ")", "or", "next", "(", "extant_cert_paths", ",", "None", ")", "or", "_certifi_where", "(", ")", ")" ]
[ 245, 0 ]
[ 252, 5 ]
python
en
['en', 'en', 'en']
True
client_with_credentials
(app)
This fixture provides a Flask app test client that has a session pre-configured with use credentials.
This fixture provides a Flask app test client that has a session pre-configured with use credentials.
def client_with_credentials(app): """This fixture provides a Flask app test client that has a session pre-configured with use credentials.""" credentials = OAuth2Credentials( 'access_token', 'client_id', 'client_secret', 'refresh_token', '3600', None, 'Test', id_token={'sub': '123', 'email': '[email protected]'}, scopes=('email', 'profile')) @contextlib.contextmanager def inner(): with app.test_client() as client: with client.session_transaction() as session: session['profile'] = { 'email': '[email protected]', 'name': 'Test User' } session['google_oauth2_credentials'] = credentials.to_json() yield client return inner
[ "def", "client_with_credentials", "(", "app", ")", ":", "credentials", "=", "OAuth2Credentials", "(", "'access_token'", ",", "'client_id'", ",", "'client_secret'", ",", "'refresh_token'", ",", "'3600'", ",", "None", ",", "'Test'", ",", "id_token", "=", "{", "'sub'", ":", "'123'", ",", "'email'", ":", "'[email protected]'", "}", ",", "scopes", "=", "(", "'email'", ",", "'profile'", ")", ")", "@", "contextlib", ".", "contextmanager", "def", "inner", "(", ")", ":", "with", "app", ".", "test_client", "(", ")", "as", "client", ":", "with", "client", ".", "session_transaction", "(", ")", "as", "session", ":", "session", "[", "'profile'", "]", "=", "{", "'email'", ":", "'[email protected]'", ",", "'name'", ":", "'Test User'", "}", "session", "[", "'google_oauth2_credentials'", "]", "=", "credentials", ".", "to_json", "(", ")", "yield", "client", "return", "inner" ]
[ 25, 0 ]
[ 50, 16 ]
python
en
['en', 'en', 'en']
True
Menu.run
(self)
Display the menu and respond to choices
Display the menu and respond to choices
def run(self): "Display the menu and respond to choices" while True: self.display_menu() choice = raw_input("> ") action = self.choices.get(choice) if action: action() else: print "{0} is not a valid choice".format(choice)
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "self", ".", "display_menu", "(", ")", "choice", "=", "raw_input", "(", "\"> \"", ")", "action", "=", "self", ".", "choices", ".", "get", "(", "choice", ")", "if", "action", ":", "action", "(", ")", "else", ":", "print", "\"{0} is not a valid choice\"", ".", "format", "(", "choice", ")" ]
[ 24, 4 ]
[ 33, 64 ]
python
en
['en', 'en', 'en']
True
_load_client_secrets
(filename)
Loads client secrets from the given filename. Args: filename: The name of the file containing the JSON secret key. Returns: A 2-tuple, the first item containing the client id, and the second item containing a client secret.
Loads client secrets from the given filename.
def _load_client_secrets(filename): """Loads client secrets from the given filename. Args: filename: The name of the file containing the JSON secret key. Returns: A 2-tuple, the first item containing the client id, and the second item containing a client secret. """ client_type, client_info = clientsecrets.loadfile(filename) if client_type != clientsecrets.TYPE_WEB: raise ValueError( 'The flow specified in {} is not supported, only the WEB flow ' 'type is supported.'.format(client_type)) return client_info['client_id'], client_info['client_secret']
[ "def", "_load_client_secrets", "(", "filename", ")", ":", "client_type", ",", "client_info", "=", "clientsecrets", ".", "loadfile", "(", "filename", ")", "if", "client_type", "!=", "clientsecrets", ".", "TYPE_WEB", ":", "raise", "ValueError", "(", "'The flow specified in {} is not supported, only the WEB flow '", "'type is supported.'", ".", "format", "(", "client_type", ")", ")", "return", "client_info", "[", "'client_id'", "]", ",", "client_info", "[", "'client_secret'", "]" ]
[ 244, 0 ]
[ 260, 65 ]
python
en
['en', 'en', 'en']
True
_get_oauth2_client_id_and_secret
(settings_instance)
Initializes client id and client secret based on the settings. Args: settings_instance: An instance of ``django.conf.settings``. Returns: A 2-tuple, the first item is the client id and the second item is the client secret.
Initializes client id and client secret based on the settings.
def _get_oauth2_client_id_and_secret(settings_instance): """Initializes client id and client secret based on the settings. Args: settings_instance: An instance of ``django.conf.settings``. Returns: A 2-tuple, the first item is the client id and the second item is the client secret. """ secret_json = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None) if secret_json is not None: return _load_client_secrets(secret_json) else: client_id = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_ID", None) client_secret = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_SECRET", None) if client_id is not None and client_secret is not None: return client_id, client_secret else: raise exceptions.ImproperlyConfigured( "Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or " "both GOOGLE_OAUTH2_CLIENT_ID and " "GOOGLE_OAUTH2_CLIENT_SECRET in settings.py")
[ "def", "_get_oauth2_client_id_and_secret", "(", "settings_instance", ")", ":", "secret_json", "=", "getattr", "(", "settings_instance", ",", "'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON'", ",", "None", ")", "if", "secret_json", "is", "not", "None", ":", "return", "_load_client_secrets", "(", "secret_json", ")", "else", ":", "client_id", "=", "getattr", "(", "settings_instance", ",", "\"GOOGLE_OAUTH2_CLIENT_ID\"", ",", "None", ")", "client_secret", "=", "getattr", "(", "settings_instance", ",", "\"GOOGLE_OAUTH2_CLIENT_SECRET\"", ",", "None", ")", "if", "client_id", "is", "not", "None", "and", "client_secret", "is", "not", "None", ":", "return", "client_id", ",", "client_secret", "else", ":", "raise", "exceptions", ".", "ImproperlyConfigured", "(", "\"Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or \"", "\"both GOOGLE_OAUTH2_CLIENT_ID and \"", "\"GOOGLE_OAUTH2_CLIENT_SECRET in settings.py\"", ")" ]
[ 263, 0 ]
[ 288, 61 ]
python
en
['en', 'en', 'en']
True
_get_storage_model
()
This configures whether the credentials will be stored in the session or the Django ORM based on the settings. By default, the credentials will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL` is found in the settings. Usually, the ORM storage is used to integrate credentials into an existing Django user system. Returns: A tuple containing three strings, or None. If ``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple will contain the fully qualifed path of the `django.db.model`, the name of the ``django.contrib.auth.models.User`` field on the model, and the name of the :class:`oauth2client.contrib.django_util.models.CredentialsField` field on the model. If Django ORM storage is not configured, this function returns None.
This configures whether the credentials will be stored in the session or the Django ORM based on the settings. By default, the credentials will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL` is found in the settings. Usually, the ORM storage is used to integrate credentials into an existing Django user system.
def _get_storage_model(): """This configures whether the credentials will be stored in the session or the Django ORM based on the settings. By default, the credentials will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL` is found in the settings. Usually, the ORM storage is used to integrate credentials into an existing Django user system. Returns: A tuple containing three strings, or None. If ``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple will contain the fully qualifed path of the `django.db.model`, the name of the ``django.contrib.auth.models.User`` field on the model, and the name of the :class:`oauth2client.contrib.django_util.models.CredentialsField` field on the model. If Django ORM storage is not configured, this function returns None. """ storage_model_settings = getattr(django.conf.settings, 'GOOGLE_OAUTH2_STORAGE_MODEL', None) if storage_model_settings is not None: return (storage_model_settings['model'], storage_model_settings['user_property'], storage_model_settings['credentials_property']) else: return None, None, None
[ "def", "_get_storage_model", "(", ")", ":", "storage_model_settings", "=", "getattr", "(", "django", ".", "conf", ".", "settings", ",", "'GOOGLE_OAUTH2_STORAGE_MODEL'", ",", "None", ")", "if", "storage_model_settings", "is", "not", "None", ":", "return", "(", "storage_model_settings", "[", "'model'", "]", ",", "storage_model_settings", "[", "'user_property'", "]", ",", "storage_model_settings", "[", "'credentials_property'", "]", ")", "else", ":", "return", "None", ",", "None", ",", "None" ]
[ 291, 0 ]
[ 315, 31 ]
python
en
['en', 'en', 'en']
True
get_storage
(request)
Gets a Credentials storage object provided by the Django OAuth2 Helper object. Args: request: Reference to the current request object. Returns: An :class:`oauth2.client.Storage` object.
Gets a Credentials storage object provided by the Django OAuth2 Helper object.
def get_storage(request): """ Gets a Credentials storage object provided by the Django OAuth2 Helper object. Args: request: Reference to the current request object. Returns: An :class:`oauth2.client.Storage` object. """ storage_model = oauth2_settings.storage_model user_property = oauth2_settings.storage_model_user_property credentials_property = oauth2_settings.storage_model_credentials_property if storage_model: module_name, class_name = storage_model.rsplit('.', 1) module = importlib.import_module(module_name) storage_model_class = getattr(module, class_name) return storage.DjangoORMStorage(storage_model_class, user_property, request.user, credentials_property) else: # use session return dictionary_storage.DictionaryStorage( request.session, key=_CREDENTIALS_KEY)
[ "def", "get_storage", "(", "request", ")", ":", "storage_model", "=", "oauth2_settings", ".", "storage_model", "user_property", "=", "oauth2_settings", ".", "storage_model_user_property", "credentials_property", "=", "oauth2_settings", ".", "storage_model_credentials_property", "if", "storage_model", ":", "module_name", ",", "class_name", "=", "storage_model", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "storage_model_class", "=", "getattr", "(", "module", ",", "class_name", ")", "return", "storage", ".", "DjangoORMStorage", "(", "storage_model_class", ",", "user_property", ",", "request", ".", "user", ",", "credentials_property", ")", "else", ":", "# use session", "return", "dictionary_storage", ".", "DictionaryStorage", "(", "request", ".", "session", ",", "key", "=", "_CREDENTIALS_KEY", ")" ]
[ 369, 0 ]
[ 394, 50 ]
python
en
['en', 'en', 'en']
True
_redirect_with_params
(url_name, *args, **kwargs)
Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string.
Helper method to create a redirect response with URL params.
def _redirect_with_params(url_name, *args, **kwargs): """Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string. """ url = urlresolvers.reverse(url_name, args=args) params = parse.urlencode(kwargs, True) return "{0}?{1}".format(url, params)
[ "def", "_redirect_with_params", "(", "url_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "url", "=", "urlresolvers", ".", "reverse", "(", "url_name", ",", "args", "=", "args", ")", "params", "=", "parse", ".", "urlencode", "(", "kwargs", ",", "True", ")", "return", "\"{0}?{1}\"", ".", "format", "(", "url", ",", "params", ")" ]
[ 397, 0 ]
[ 412, 40 ]
python
en
['en', 'en', 'en']
True
_credentials_from_request
(request)
Gets the authorized credentials for this flow, if they exist.
Gets the authorized credentials for this flow, if they exist.
def _credentials_from_request(request): """Gets the authorized credentials for this flow, if they exist.""" # ORM storage requires a logged in user if (oauth2_settings.storage_model is None or request.user.is_authenticated()): return get_storage(request).get() else: return None
[ "def", "_credentials_from_request", "(", "request", ")", ":", "# ORM storage requires a logged in user", "if", "(", "oauth2_settings", ".", "storage_model", "is", "None", "or", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "return", "get_storage", "(", "request", ")", ".", "get", "(", ")", "else", ":", "return", "None" ]
[ 415, 0 ]
[ 422, 19 ]
python
en
['en', 'en', 'en']
True
UserOAuth2.__init__
(self, request, scopes=None, return_url=None)
Initialize the Oauth2 Object. Args: request: Django request object. scopes: Scopes desired for this OAuth2 flow. return_url: The url to return to after the OAuth flow is complete, defaults to the request's current URL path.
Initialize the Oauth2 Object.
def __init__(self, request, scopes=None, return_url=None): """Initialize the Oauth2 Object. Args: request: Django request object. scopes: Scopes desired for this OAuth2 flow. return_url: The url to return to after the OAuth flow is complete, defaults to the request's current URL path. """ self.request = request self.return_url = return_url or request.get_full_path() if scopes: self._scopes = set(oauth2_settings.scopes) | set(scopes) else: self._scopes = set(oauth2_settings.scopes)
[ "def", "__init__", "(", "self", ",", "request", ",", "scopes", "=", "None", ",", "return_url", "=", "None", ")", ":", "self", ".", "request", "=", "request", "self", ".", "return_url", "=", "return_url", "or", "request", ".", "get_full_path", "(", ")", "if", "scopes", ":", "self", ".", "_scopes", "=", "set", "(", "oauth2_settings", ".", "scopes", ")", "|", "set", "(", "scopes", ")", "else", ":", "self", ".", "_scopes", "=", "set", "(", "oauth2_settings", ".", "scopes", ")" ]
[ 430, 4 ]
[ 444, 54 ]
python
en
['en', 'en', 'en']
True
UserOAuth2.get_authorize_redirect
(self)
Creates a URl to start the OAuth2 authorization flow.
Creates a URl to start the OAuth2 authorization flow.
def get_authorize_redirect(self): """Creates a URl to start the OAuth2 authorization flow.""" get_params = { 'return_url': self.return_url, 'scopes': self._get_scopes() } return _redirect_with_params('google_oauth:authorize', **get_params)
[ "def", "get_authorize_redirect", "(", "self", ")", ":", "get_params", "=", "{", "'return_url'", ":", "self", ".", "return_url", ",", "'scopes'", ":", "self", ".", "_get_scopes", "(", ")", "}", "return", "_redirect_with_params", "(", "'google_oauth:authorize'", ",", "*", "*", "get_params", ")" ]
[ 446, 4 ]
[ 453, 76 ]
python
en
['en', 'en', 'en']
True
UserOAuth2.has_credentials
(self)
Returns True if there are valid credentials for the current user and required scopes.
Returns True if there are valid credentials for the current user and required scopes.
def has_credentials(self): """Returns True if there are valid credentials for the current user and required scopes.""" credentials = _credentials_from_request(self.request) return (credentials and not credentials.invalid and credentials.has_scopes(self._get_scopes()))
[ "def", "has_credentials", "(", "self", ")", ":", "credentials", "=", "_credentials_from_request", "(", "self", ".", "request", ")", "return", "(", "credentials", "and", "not", "credentials", ".", "invalid", "and", "credentials", ".", "has_scopes", "(", "self", ".", "_get_scopes", "(", ")", ")", ")" ]
[ 455, 4 ]
[ 460, 59 ]
python
en
['en', 'en', 'en']
True
UserOAuth2._get_scopes
(self)
Returns the scopes associated with this object, kept up to date for incremental auth.
Returns the scopes associated with this object, kept up to date for incremental auth.
def _get_scopes(self): """Returns the scopes associated with this object, kept up to date for incremental auth.""" if _credentials_from_request(self.request): return (self._scopes | _credentials_from_request(self.request).scopes) else: return self._scopes
[ "def", "_get_scopes", "(", "self", ")", ":", "if", "_credentials_from_request", "(", "self", ".", "request", ")", ":", "return", "(", "self", ".", "_scopes", "|", "_credentials_from_request", "(", "self", ".", "request", ")", ".", "scopes", ")", "else", ":", "return", "self", ".", "_scopes" ]
[ 462, 4 ]
[ 469, 31 ]
python
en
['en', 'en', 'en']
True
UserOAuth2.scopes
(self)
Returns the scopes associated with this OAuth2 object.
Returns the scopes associated with this OAuth2 object.
def scopes(self): """Returns the scopes associated with this OAuth2 object.""" # make sure previously requested custom scopes are maintained # in future authorizations return self._get_scopes()
[ "def", "scopes", "(", "self", ")", ":", "# make sure previously requested custom scopes are maintained", "# in future authorizations", "return", "self", ".", "_get_scopes", "(", ")" ]
[ 472, 4 ]
[ 476, 33 ]
python
en
['en', 'en', 'en']
True
UserOAuth2.credentials
(self)
Gets the authorized credentials for this flow, if they exist.
Gets the authorized credentials for this flow, if they exist.
def credentials(self): """Gets the authorized credentials for this flow, if they exist.""" return _credentials_from_request(self.request)
[ "def", "credentials", "(", "self", ")", ":", "return", "_credentials_from_request", "(", "self", ".", "request", ")" ]
[ 479, 4 ]
[ 481, 54 ]
python
en
['en', 'en', 'en']
True
UserOAuth2.http
(self)
Helper: create HTTP client authorized with OAuth2 credentials.
Helper: create HTTP client authorized with OAuth2 credentials.
def http(self): """Helper: create HTTP client authorized with OAuth2 credentials.""" if self.has_credentials(): return self.credentials.authorize(transport.get_http_object()) return None
[ "def", "http", "(", "self", ")", ":", "if", "self", ".", "has_credentials", "(", ")", ":", "return", "self", ".", "credentials", ".", "authorize", "(", "transport", ".", "get_http_object", "(", ")", ")", "return", "None" ]
[ 484, 4 ]
[ 488, 19 ]
python
en
['en', 'en', 'en']
True
keygen
()
Key generator.
Key generator.
def keygen(): """Key generator.""" # Parse the CLI options parser = OptionParser(usage='usage: %prog [options] keysize', description='Generates a new RSA keypair of "keysize" bits.') parser.add_option('--pubout', type='string', help='Output filename for the public key. The public key is ' 'not saved if this option is not present. You can use ' 'pyrsa-priv2pub to create the public key file later.') parser.add_option('-o', '--out', type='string', help='Output filename for the private key. The key is ' 'written to stdout if this option is not present.') parser.add_option('--form', help='key format of the private and public keys - default PEM', choices=('PEM', 'DER'), default='PEM') (cli, cli_args) = parser.parse_args(sys.argv[1:]) if len(cli_args) != 1: parser.print_help() raise SystemExit(1) try: keysize = int(cli_args[0]) except ValueError: parser.print_help() print('Not a valid number: %s' % cli_args[0], file=sys.stderr) raise SystemExit(1) print('Generating %i-bit key' % keysize, file=sys.stderr) (pub_key, priv_key) = rsa.newkeys(keysize) # Save public key if cli.pubout: print('Writing public key to %s' % cli.pubout, file=sys.stderr) data = pub_key.save_pkcs1(format=cli.form) with open(cli.pubout, 'wb') as outfile: outfile.write(data) # Save private key data = priv_key.save_pkcs1(format=cli.form) if cli.out: print('Writing private key to %s' % cli.out, file=sys.stderr) with open(cli.out, 'wb') as outfile: outfile.write(data) else: print('Writing private key to stdout', file=sys.stderr) rsa._compat.write_to_stdout(data)
[ "def", "keygen", "(", ")", ":", "# Parse the CLI options", "parser", "=", "OptionParser", "(", "usage", "=", "'usage: %prog [options] keysize'", ",", "description", "=", "'Generates a new RSA keypair of \"keysize\" bits.'", ")", "parser", ".", "add_option", "(", "'--pubout'", ",", "type", "=", "'string'", ",", "help", "=", "'Output filename for the public key. The public key is '", "'not saved if this option is not present. You can use '", "'pyrsa-priv2pub to create the public key file later.'", ")", "parser", ".", "add_option", "(", "'-o'", ",", "'--out'", ",", "type", "=", "'string'", ",", "help", "=", "'Output filename for the private key. The key is '", "'written to stdout if this option is not present.'", ")", "parser", ".", "add_option", "(", "'--form'", ",", "help", "=", "'key format of the private and public keys - default PEM'", ",", "choices", "=", "(", "'PEM'", ",", "'DER'", ")", ",", "default", "=", "'PEM'", ")", "(", "cli", ",", "cli_args", ")", "=", "parser", ".", "parse_args", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "if", "len", "(", "cli_args", ")", "!=", "1", ":", "parser", ".", "print_help", "(", ")", "raise", "SystemExit", "(", "1", ")", "try", ":", "keysize", "=", "int", "(", "cli_args", "[", "0", "]", ")", "except", "ValueError", ":", "parser", ".", "print_help", "(", ")", "print", "(", "'Not a valid number: %s'", "%", "cli_args", "[", "0", "]", ",", "file", "=", "sys", ".", "stderr", ")", "raise", "SystemExit", "(", "1", ")", "print", "(", "'Generating %i-bit key'", "%", "keysize", ",", "file", "=", "sys", ".", "stderr", ")", "(", "pub_key", ",", "priv_key", ")", "=", "rsa", ".", "newkeys", "(", "keysize", ")", "# Save public key", "if", "cli", ".", "pubout", ":", "print", "(", "'Writing public key to %s'", "%", "cli", ".", "pubout", ",", "file", "=", "sys", ".", "stderr", ")", "data", "=", "pub_key", ".", "save_pkcs1", "(", "format", "=", "cli", ".", "form", ")", "with", "open", "(", "cli", ".", "pubout", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "data", ")", "# Save private key", "data", "=", "priv_key", ".", "save_pkcs1", "(", "format", "=", "cli", ".", "form", ")", "if", "cli", ".", "out", ":", "print", "(", "'Writing private key to %s'", "%", "cli", ".", "out", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "cli", ".", "out", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "data", ")", "else", ":", "print", "(", "'Writing private key to stdout'", ",", "file", "=", "sys", ".", "stderr", ")", "rsa", ".", "_compat", ".", "write_to_stdout", "(", "data", ")" ]
[ 33, 0 ]
[ 85, 41 ]
python
de
['de', 'uk', 'en']
False
CryptoOperation.perform_operation
(self, indata, key, cli_args)
Performs the program's operation. Implement in a subclass. :returns: the data to write to the output.
Performs the program's operation.
def perform_operation(self, indata, key, cli_args): """Performs the program's operation. Implement in a subclass. :returns: the data to write to the output. """
[ "def", "perform_operation", "(", "self", ",", "indata", ",", "key", ",", "cli_args", ")", ":" ]
[ 114, 4 ]
[ 120, 11 ]
python
en
['en', 'en', 'en']
True
CryptoOperation.__call__
(self)
Runs the program.
Runs the program.
def __call__(self): """Runs the program.""" (cli, cli_args) = self.parse_cli() key = self.read_key(cli_args[0], cli.keyform) indata = self.read_infile(cli.input) print(self.operation_progressive.title(), file=sys.stderr) outdata = self.perform_operation(indata, key, cli_args) if self.has_output: self.write_outfile(outdata, cli.output)
[ "def", "__call__", "(", "self", ")", ":", "(", "cli", ",", "cli_args", ")", "=", "self", ".", "parse_cli", "(", ")", "key", "=", "self", ".", "read_key", "(", "cli_args", "[", "0", "]", ",", "cli", ".", "keyform", ")", "indata", "=", "self", ".", "read_infile", "(", "cli", ".", "input", ")", "print", "(", "self", ".", "operation_progressive", ".", "title", "(", ")", ",", "file", "=", "sys", ".", "stderr", ")", "outdata", "=", "self", ".", "perform_operation", "(", "indata", ",", "key", ",", "cli_args", ")", "if", "self", ".", "has_output", ":", "self", ".", "write_outfile", "(", "outdata", ",", "cli", ".", "output", ")" ]
[ 122, 4 ]
[ 135, 51 ]
python
en
['en', 'sv', 'en']
True
CryptoOperation.parse_cli
(self)
Parse the CLI options :returns: (cli_opts, cli_args)
Parse the CLI options
def parse_cli(self): """Parse the CLI options :returns: (cli_opts, cli_args) """ parser = OptionParser(usage=self.usage, description=self.description) parser.add_option('-i', '--input', type='string', help=self.input_help) if self.has_output: parser.add_option('-o', '--output', type='string', help=self.output_help) parser.add_option('--keyform', help='Key format of the %s key - default PEM' % self.keyname, choices=('PEM', 'DER'), default='PEM') (cli, cli_args) = parser.parse_args(sys.argv[1:]) if len(cli_args) != self.expected_cli_args: parser.print_help() raise SystemExit(1) return cli, cli_args
[ "def", "parse_cli", "(", "self", ")", ":", "parser", "=", "OptionParser", "(", "usage", "=", "self", ".", "usage", ",", "description", "=", "self", ".", "description", ")", "parser", ".", "add_option", "(", "'-i'", ",", "'--input'", ",", "type", "=", "'string'", ",", "help", "=", "self", ".", "input_help", ")", "if", "self", ".", "has_output", ":", "parser", ".", "add_option", "(", "'-o'", ",", "'--output'", ",", "type", "=", "'string'", ",", "help", "=", "self", ".", "output_help", ")", "parser", ".", "add_option", "(", "'--keyform'", ",", "help", "=", "'Key format of the %s key - default PEM'", "%", "self", ".", "keyname", ",", "choices", "=", "(", "'PEM'", ",", "'DER'", ")", ",", "default", "=", "'PEM'", ")", "(", "cli", ",", "cli_args", ")", "=", "parser", ".", "parse_args", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "if", "len", "(", "cli_args", ")", "!=", "self", ".", "expected_cli_args", ":", "parser", ".", "print_help", "(", ")", "raise", "SystemExit", "(", "1", ")", "return", "cli", ",", "cli_args" ]
[ 137, 4 ]
[ 160, 28 ]
python
en
['en', 'en', 'en']
True
CryptoOperation.read_key
(self, filename, keyform)
Reads a public or private key.
Reads a public or private key.
def read_key(self, filename, keyform): """Reads a public or private key.""" print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr) with open(filename, 'rb') as keyfile: keydata = keyfile.read() return self.key_class.load_pkcs1(keydata, keyform)
[ "def", "read_key", "(", "self", ",", "filename", ",", "keyform", ")", ":", "print", "(", "'Reading %s key from %s'", "%", "(", "self", ".", "keyname", ",", "filename", ")", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "keyfile", ":", "keydata", "=", "keyfile", ".", "read", "(", ")", "return", "self", ".", "key_class", ".", "load_pkcs1", "(", "keydata", ",", "keyform", ")" ]
[ 162, 4 ]
[ 169, 58 ]
python
en
['en', 'en', 'en']
True
CryptoOperation.read_infile
(self, inname)
Read the input file
Read the input file
def read_infile(self, inname): """Read the input file""" if inname: print('Reading input from %s' % inname, file=sys.stderr) with open(inname, 'rb') as infile: return infile.read() print('Reading input from stdin', file=sys.stderr) return sys.stdin.read()
[ "def", "read_infile", "(", "self", ",", "inname", ")", ":", "if", "inname", ":", "print", "(", "'Reading input from %s'", "%", "inname", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "inname", ",", "'rb'", ")", "as", "infile", ":", "return", "infile", ".", "read", "(", ")", "print", "(", "'Reading input from stdin'", ",", "file", "=", "sys", ".", "stderr", ")", "return", "sys", ".", "stdin", ".", "read", "(", ")" ]
[ 171, 4 ]
[ 180, 31 ]
python
en
['en', 'en', 'en']
True
CryptoOperation.write_outfile
(self, outdata, outname)
Write the output file
Write the output file
def write_outfile(self, outdata, outname): """Write the output file""" if outname: print('Writing output to %s' % outname, file=sys.stderr) with open(outname, 'wb') as outfile: outfile.write(outdata) else: print('Writing output to stdout', file=sys.stderr) rsa._compat.write_to_stdout(outdata)
[ "def", "write_outfile", "(", "self", ",", "outdata", ",", "outname", ")", ":", "if", "outname", ":", "print", "(", "'Writing output to %s'", "%", "outname", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "outname", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "outdata", ")", "else", ":", "print", "(", "'Writing output to stdout'", ",", "file", "=", "sys", ".", "stderr", ")", "rsa", ".", "_compat", ".", "write_to_stdout", "(", "outdata", ")" ]
[ 182, 4 ]
[ 191, 48 ]
python
en
['en', 'sm', 'en']
True
EncryptOperation.perform_operation
(self, indata, pub_key, cli_args=None)
Encrypts files.
Encrypts files.
def perform_operation(self, indata, pub_key, cli_args=None): """Encrypts files.""" return rsa.encrypt(indata, pub_key)
[ "def", "perform_operation", "(", "self", ",", "indata", ",", "pub_key", ",", "cli_args", "=", "None", ")", ":", "return", "rsa", ".", "encrypt", "(", "indata", ",", "pub_key", ")" ]
[ 204, 4 ]
[ 207, 43 ]
python
en
['en', 'ht', 'en']
False
DecryptOperation.perform_operation
(self, indata, priv_key, cli_args=None)
Decrypts files.
Decrypts files.
def perform_operation(self, indata, priv_key, cli_args=None): """Decrypts files.""" return rsa.decrypt(indata, priv_key)
[ "def", "perform_operation", "(", "self", ",", "indata", ",", "priv_key", ",", "cli_args", "=", "None", ")", ":", "return", "rsa", ".", "decrypt", "(", "indata", ",", "priv_key", ")" ]
[ 221, 4 ]
[ 224, 44 ]
python
en
['en', 'lv', 'en']
False
SignOperation.perform_operation
(self, indata, priv_key, cli_args)
Signs files.
Signs files.
def perform_operation(self, indata, priv_key, cli_args): """Signs files.""" hash_method = cli_args[1] if hash_method not in HASH_METHODS: raise SystemExit('Invalid hash method, choose one of %s' % ', '.join(HASH_METHODS)) return rsa.sign(indata, priv_key, hash_method)
[ "def", "perform_operation", "(", "self", ",", "indata", ",", "priv_key", ",", "cli_args", ")", ":", "hash_method", "=", "cli_args", "[", "1", "]", "if", "hash_method", "not", "in", "HASH_METHODS", ":", "raise", "SystemExit", "(", "'Invalid hash method, choose one of %s'", "%", "', '", ".", "join", "(", "HASH_METHODS", ")", ")", "return", "rsa", ".", "sign", "(", "indata", ",", "priv_key", ",", "hash_method", ")" ]
[ 243, 4 ]
[ 251, 54 ]
python
en
['en', 'en', 'en']
False
VerifyOperation.perform_operation
(self, indata, pub_key, cli_args)
Verifies files.
Verifies files.
def perform_operation(self, indata, pub_key, cli_args): """Verifies files.""" signature_file = cli_args[1] with open(signature_file, 'rb') as sigfile: signature = sigfile.read() try: rsa.verify(indata, signature, pub_key) except rsa.VerificationError: raise SystemExit('Verification failed.') print('Verification OK', file=sys.stderr)
[ "def", "perform_operation", "(", "self", ",", "indata", ",", "pub_key", ",", "cli_args", ")", ":", "signature_file", "=", "cli_args", "[", "1", "]", "with", "open", "(", "signature_file", ",", "'rb'", ")", "as", "sigfile", ":", "signature", "=", "sigfile", ".", "read", "(", ")", "try", ":", "rsa", ".", "verify", "(", "indata", ",", "signature", ",", "pub_key", ")", "except", "rsa", ".", "VerificationError", ":", "raise", "SystemExit", "(", "'Verification failed.'", ")", "print", "(", "'Verification OK'", ",", "file", "=", "sys", ".", "stderr", ")" ]
[ 268, 4 ]
[ 281, 49 ]
python
en
['en', 'lv', 'en']
False
average_losses
(all_losses)
Average the losses into one dict of losses. Args: all_losses: List of dictionary of losses. Returns: combined: A dictionary with same keys as individual dicts, with all losses combined.
Average the losses into one dict of losses. Args: all_losses: List of dictionary of losses. Returns: combined: A dictionary with same keys as individual dicts, with all losses combined.
def average_losses(all_losses): """Average the losses into one dict of losses. Args: all_losses: List of dictionary of losses. Returns: combined: A dictionary with same keys as individual dicts, with all losses combined. """ if len(all_losses) == 0: return {} combined = {} for key, val in all_losses[0].items(): if not isinstance(val, torch.Tensor): # If it's none or sth.. eg some loss was not active combined[key] = val else: # Average all the values stkd = torch.stack([el[key] for el in all_losses]) # Average the losses that are positive, since I set undefined # losses to -1 (where not enough GT is available, etc) combined[key] = torch.mean(stkd * (stkd >= 0), dim=0) return combined
[ "def", "average_losses", "(", "all_losses", ")", ":", "if", "len", "(", "all_losses", ")", "==", "0", ":", "return", "{", "}", "combined", "=", "{", "}", "for", "key", ",", "val", "in", "all_losses", "[", "0", "]", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "val", ",", "torch", ".", "Tensor", ")", ":", "# If it's none or sth.. eg some loss was not active", "combined", "[", "key", "]", "=", "val", "else", ":", "# Average all the values", "stkd", "=", "torch", ".", "stack", "(", "[", "el", "[", "key", "]", "for", "el", "in", "all_losses", "]", ")", "# Average the losses that are positive, since I set undefined", "# losses to -1 (where not enough GT is available, etc)", "combined", "[", "key", "]", "=", "torch", ".", "mean", "(", "stkd", "*", "(", "stkd", ">=", "0", ")", ",", "dim", "=", "0", ")", "return", "combined" ]
[ 796, 0 ]
[ 817, 19 ]
python
en
['en', 'en', 'en']
True
combine_obj_pixels
(obj_pix, obj_dim)
Combine obj-split pixels into a single image. Args: obj_pix: B, ..., Nobj, ..., C, H, W obj_dim: The dimension to reduce over -- which corresponds to objs Returns B, ..., ..., C, H, W
Combine obj-split pixels into a single image. Args: obj_pix: B, ..., Nobj, ..., C, H, W obj_dim: The dimension to reduce over -- which corresponds to objs Returns B, ..., ..., C, H, W
def combine_obj_pixels(obj_pix, obj_dim): """Combine obj-split pixels into a single image. Args: obj_pix: B, ..., Nobj, ..., C, H, W obj_dim: The dimension to reduce over -- which corresponds to objs Returns B, ..., ..., C, H, W """ if obj_pix is None: return None return torch.max(obj_pix, dim=obj_dim)[0]
[ "def", "combine_obj_pixels", "(", "obj_pix", ",", "obj_dim", ")", ":", "if", "obj_pix", "is", "None", ":", "return", "None", "return", "torch", ".", "max", "(", "obj_pix", ",", "dim", "=", "obj_dim", ")", "[", "0", "]" ]
[ 1146, 0 ]
[ 1156, 45 ]
python
en
['en', 'fr', 'en']
True
DynConcat.forward
(self, features, pixels)
This dyn model does not use pixels, so will just return the last history frame Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pixels: (B, Nobj, C, H, W) addl_losses: {}
This dyn model does not use pixels, so will just return the last history frame Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pixels: (B, Nobj, C, H, W) addl_losses: {}
def forward(self, features, pixels): """ This dyn model does not use pixels, so will just return the last history frame Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pixels: (B, Nobj, C, H, W) addl_losses: {} """ cat_feats = torch.reshape(features, (features.shape[0], -1) + features.shape[-2:]) future_feat = torch.reshape(self.dyn(cat_feats), features.shape[:1] + features.shape[2:]) # Skip connection, add the last frames features, so it stops # deleting things pred = features[:, -1, ...] + future_feat return pred, pixels[:, -1, ...], {}
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "cat_feats", "=", "torch", ".", "reshape", "(", "features", ",", "(", "features", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "future_feat", "=", "torch", ".", "reshape", "(", "self", ".", "dyn", "(", "cat_feats", ")", ",", "features", ".", "shape", "[", ":", "1", "]", "+", "features", ".", "shape", "[", "2", ":", "]", ")", "# Skip connection, add the last frames features, so it stops", "# deleting things", "pred", "=", "features", "[", ":", ",", "-", "1", ",", "...", "]", "+", "future_feat", "return", "pred", ",", "pixels", "[", ":", ",", "-", "1", ",", "...", "]", ",", "{", "}" ]
[ 266, 4 ]
[ 285, 43 ]
python
en
['en', 'error', 'th']
False
MultiSTN.__init__
(self, input_dim, num_tx, dof='affine', inp_type='pix', affine_tx_mode='bilinear', kernel_size=3, stochastic=False)
Args: input_dim (int): Dimension of the features used to predict the STN parameters num_tx (int): Number of transformations to predict, will apply to the tensor, split along some dimension dof (str): Controls how generic of a affine matrix to predict. If 'affine', will predict a generic 3x2 matrix If 'rot-trans-only', it will only predict theta, x, y, and use those to construct the affine matrix. So it will force the matrix to not do any shear, scale etc. Similarly for 'rot-only' and 'trans-only' inp_type (str): Defines the type of the input. 'pix' is the default, to directly transform the grid and move the pixels. 'pt' is the PointNet style format, where the first 2 dimensions of each split of the channels must correspond to the X, Y location, and the transforms will just modify those dimensions, and not touch the pixel values at all. affine_tx_mode (str): The mode to use for grid_sample kernel_size (int) stochastic (bool): If true, predict a distribution over the affine matrix, instead of deterministically.
Args: input_dim (int): Dimension of the features used to predict the STN parameters num_tx (int): Number of transformations to predict, will apply to the tensor, split along some dimension dof (str): Controls how generic of a affine matrix to predict. If 'affine', will predict a generic 3x2 matrix If 'rot-trans-only', it will only predict theta, x, y, and use those to construct the affine matrix. So it will force the matrix to not do any shear, scale etc. Similarly for 'rot-only' and 'trans-only' inp_type (str): Defines the type of the input. 'pix' is the default, to directly transform the grid and move the pixels. 'pt' is the PointNet style format, where the first 2 dimensions of each split of the channels must correspond to the X, Y location, and the transforms will just modify those dimensions, and not touch the pixel values at all. affine_tx_mode (str): The mode to use for grid_sample kernel_size (int) stochastic (bool): If true, predict a distribution over the affine matrix, instead of deterministically.
def __init__(self, input_dim, num_tx, dof='affine', inp_type='pix', affine_tx_mode='bilinear', kernel_size=3, stochastic=False): """ Args: input_dim (int): Dimension of the features used to predict the STN parameters num_tx (int): Number of transformations to predict, will apply to the tensor, split along some dimension dof (str): Controls how generic of a affine matrix to predict. If 'affine', will predict a generic 3x2 matrix If 'rot-trans-only', it will only predict theta, x, y, and use those to construct the affine matrix. So it will force the matrix to not do any shear, scale etc. Similarly for 'rot-only' and 'trans-only' inp_type (str): Defines the type of the input. 'pix' is the default, to directly transform the grid and move the pixels. 'pt' is the PointNet style format, where the first 2 dimensions of each split of the channels must correspond to the X, Y location, and the transforms will just modify those dimensions, and not touch the pixel values at all. affine_tx_mode (str): The mode to use for grid_sample kernel_size (int) stochastic (bool): If true, predict a distribution over the affine matrix, instead of deterministically. """ super().__init__() self.num_tx = num_tx self.dof = dof self.inp_type = inp_type self.affine_tx_mode = affine_tx_mode # Spatial transformer localization-network self.localization = nn.Sequential( nn.Conv2d(input_dim, 8 * num_tx, kernel_size=kernel_size, padding=kernel_size // 2), nn.ReLU(True), nn.Conv2d(8 * num_tx, 10 * num_tx, kernel_size=kernel_size, padding=kernel_size // 2), nn.ReLU(True)) # Regressor for the affine matrices # Predicting 3x2 parameters that should be enough for any generic # affine transformation, though will subselect in case only few # parameters are needed self.stochastic = stochastic if self.stochastic: self.fc_loc_mean = nn.Linear(10 * num_tx, 10 * num_tx) self.fc_loc_logvar = nn.Linear(10 * num_tx, 10 * num_tx) self.fc_loc = nn.Sequential(nn.Linear(10 * num_tx, 32 * num_tx), nn.ReLU(True), nn.Linear(32 * num_tx, num_tx * 3 * 2)) # Initialize the weights/bias with identity transformation self.fc_loc[2].weight.data.zero_() if self.dof != 'affine': # The paramters would be used for rot/trans self.fc_loc[2].bias.data.zero_() # 0 rot/translation by default else: self.fc_loc[2].bias.data.copy_( torch.from_numpy( np.array([1, 0, 0, 0, 1, 0] * num_tx, dtype=np.float)))
[ "def", "__init__", "(", "self", ",", "input_dim", ",", "num_tx", ",", "dof", "=", "'affine'", ",", "inp_type", "=", "'pix'", ",", "affine_tx_mode", "=", "'bilinear'", ",", "kernel_size", "=", "3", ",", "stochastic", "=", "False", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "num_tx", "=", "num_tx", "self", ".", "dof", "=", "dof", "self", ".", "inp_type", "=", "inp_type", "self", ".", "affine_tx_mode", "=", "affine_tx_mode", "# Spatial transformer localization-network", "self", ".", "localization", "=", "nn", ".", "Sequential", "(", "nn", ".", "Conv2d", "(", "input_dim", ",", "8", "*", "num_tx", ",", "kernel_size", "=", "kernel_size", ",", "padding", "=", "kernel_size", "//", "2", ")", ",", "nn", ".", "ReLU", "(", "True", ")", ",", "nn", ".", "Conv2d", "(", "8", "*", "num_tx", ",", "10", "*", "num_tx", ",", "kernel_size", "=", "kernel_size", ",", "padding", "=", "kernel_size", "//", "2", ")", ",", "nn", ".", "ReLU", "(", "True", ")", ")", "# Regressor for the affine matrices", "# Predicting 3x2 parameters that should be enough for any generic", "# affine transformation, though will subselect in case only few", "# parameters are needed", "self", ".", "stochastic", "=", "stochastic", "if", "self", ".", "stochastic", ":", "self", ".", "fc_loc_mean", "=", "nn", ".", "Linear", "(", "10", "*", "num_tx", ",", "10", "*", "num_tx", ")", "self", ".", "fc_loc_logvar", "=", "nn", ".", "Linear", "(", "10", "*", "num_tx", ",", "10", "*", "num_tx", ")", "self", ".", "fc_loc", "=", "nn", ".", "Sequential", "(", "nn", ".", "Linear", "(", "10", "*", "num_tx", ",", "32", "*", "num_tx", ")", ",", "nn", ".", "ReLU", "(", "True", ")", ",", "nn", ".", "Linear", "(", "32", "*", "num_tx", ",", "num_tx", "*", "3", "*", "2", ")", ")", "# Initialize the weights/bias with identity transformation", "self", ".", "fc_loc", "[", "2", "]", ".", "weight", ".", "data", ".", "zero_", "(", ")", "if", "self", ".", "dof", "!=", "'affine'", ":", "# The paramters would be used for rot/trans", "self", ".", "fc_loc", "[", "2", "]", ".", "bias", ".", "data", ".", "zero_", "(", ")", "# 0 rot/translation by default", "else", ":", "self", ".", "fc_loc", "[", "2", "]", ".", "bias", ".", "data", ".", "copy_", "(", "torch", ".", "from_numpy", "(", "np", ".", "array", "(", "[", "1", ",", "0", ",", "0", ",", "0", ",", "1", ",", "0", "]", "*", "num_tx", ",", "dtype", "=", "np", ".", "float", ")", ")", ")" ]
[ 291, 4 ]
[ 357, 75 ]
python
en
['en', 'error', 'th']
False
MultiSTN.transform_pix
(self, feat, theta, mode='bilinear')
Transform the features using theta.
Transform the features using theta.
def transform_pix(self, feat, theta, mode='bilinear'): """Transform the features using theta.""" grid = nn.functional.affine_grid(theta, feat.size(), align_corners=True) return nn.functional.grid_sample(feat, grid, mode=mode, align_corners=True)
[ "def", "transform_pix", "(", "self", ",", "feat", ",", "theta", ",", "mode", "=", "'bilinear'", ")", ":", "grid", "=", "nn", ".", "functional", ".", "affine_grid", "(", "theta", ",", "feat", ".", "size", "(", ")", ",", "align_corners", "=", "True", ")", "return", "nn", ".", "functional", ".", "grid_sample", "(", "feat", ",", "grid", ",", "mode", "=", "mode", ",", "align_corners", "=", "True", ")" ]
[ 359, 4 ]
[ 367, 60 ]
python
en
['en', 'en', 'en']
True
MultiSTN.transform_pt
(self, feat, theta)
Transform pt-net style feature using theta. Here, it assumes the first 2 dimensions of the feature are loc. Args: feat (B, C, H, W), C >= 2 Returns: tx feat (B, C, H, W)
Transform pt-net style feature using theta. Here, it assumes the first 2 dimensions of the feature are loc. Args: feat (B, C, H, W), C >= 2 Returns: tx feat (B, C, H, W)
def transform_pt(self, feat, theta): """Transform pt-net style feature using theta. Here, it assumes the first 2 dimensions of the feature are loc. Args: feat (B, C, H, W), C >= 2 Returns: tx feat (B, C, H, W) """ assert feat.shape[1] >= 2 feat_pos = feat[:, :2, ...] feat_pos_ones = torch.ones_like(feat[:, :1, ...]) feat_pos_aug = torch.cat([feat_pos, feat_pos_ones], dim=1) feat_pos_aug = feat_pos_aug.view(feat.shape[:1] + (3, -1)) feat_pos_aug_end = feat_pos_aug.transpose(1, 2).unsqueeze(-1) txed = torch.matmul(theta.unsqueeze(1), feat_pos_aug_end) tx_feat_pos = txed.squeeze(-1).transpose(1, 2).view(feat_pos.shape) # Attach the features to it tx_feat = torch.cat([tx_feat_pos, feat[:, 2:, ...]], dim=1) return tx_feat
[ "def", "transform_pt", "(", "self", ",", "feat", ",", "theta", ")", ":", "assert", "feat", ".", "shape", "[", "1", "]", ">=", "2", "feat_pos", "=", "feat", "[", ":", ",", ":", "2", ",", "...", "]", "feat_pos_ones", "=", "torch", ".", "ones_like", "(", "feat", "[", ":", ",", ":", "1", ",", "...", "]", ")", "feat_pos_aug", "=", "torch", ".", "cat", "(", "[", "feat_pos", ",", "feat_pos_ones", "]", ",", "dim", "=", "1", ")", "feat_pos_aug", "=", "feat_pos_aug", ".", "view", "(", "feat", ".", "shape", "[", ":", "1", "]", "+", "(", "3", ",", "-", "1", ")", ")", "feat_pos_aug_end", "=", "feat_pos_aug", ".", "transpose", "(", "1", ",", "2", ")", ".", "unsqueeze", "(", "-", "1", ")", "txed", "=", "torch", ".", "matmul", "(", "theta", ".", "unsqueeze", "(", "1", ")", ",", "feat_pos_aug_end", ")", "tx_feat_pos", "=", "txed", ".", "squeeze", "(", "-", "1", ")", ".", "transpose", "(", "1", ",", "2", ")", ".", "view", "(", "feat_pos", ".", "shape", ")", "# Attach the features to it", "tx_feat", "=", "torch", ".", "cat", "(", "[", "tx_feat_pos", ",", "feat", "[", ":", ",", "2", ":", ",", "...", "]", "]", ",", "dim", "=", "1", ")", "return", "tx_feat" ]
[ 369, 4 ]
[ 387, 22 ]
python
en
['en', 'en', 'en']
True
MultiSTN.forward
(self, feat_for_tx, feat_to_tx, split_dim=1)
Args: feat_for_tx (B, D, H, W): The features to use to compute the transformation feat_to_tx (B, D', H, W): Features to apply the tx onto split_dim (int): Dimension to split on
Args: feat_for_tx (B, D, H, W): The features to use to compute the transformation feat_to_tx (B, D', H, W): Features to apply the tx onto split_dim (int): Dimension to split on
def forward(self, feat_for_tx, feat_to_tx, split_dim=1): """ Args: feat_for_tx (B, D, H, W): The features to use to compute the transformation feat_to_tx (B, D', H, W): Features to apply the tx onto split_dim (int): Dimension to split on """ feat_hist_embed = self.localization(feat_for_tx) # Average out the spatial dimension feat_hist_embed = torch.mean(feat_hist_embed, dim=[-2, -1]) addl_losses = {} if self.stochastic: pred, kl_loss = self._compute_loc_stochastic(feat_hist_embed) addl_losses['kl'] = kl_loss else: pred = self.fc_loc(feat_hist_embed) if self.dof != 'affine': pred = pred.view(-1, self.num_tx, 3 * 2) # Say the first number is actual angle, and next 2 are x, y angle = pred[..., :1] pos_x = pred[..., 1:2] pos_y = pred[..., 2:3] if self.dof == 'rot-only': pos_x = torch.zeros_like(pos_x) pos_y = torch.zeros_like(pos_y) elif self.dof == 'trans-only': angle = torch.zeros_like(angle) else: assert self.dof == 'rot-trans-only', 'The only other option' cos_angle = torch.cos(angle) sin_angle = torch.sin(angle) # create the 2x3 matrix out of this theta = torch.cat( [cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y], dim=-1) theta = theta.view(theta.shape[:-1] + (2, 3)) elif self.dof == 'affine': theta = pred.view(-1, self.num_tx, 2, 3) else: raise NotImplementedError('Unknown {}'.format(self.dof)) # Split the channels of feat_to_tx into num_tx groups, and apply the # transformations to each of those groups assert feat_to_tx.shape[split_dim] % self.num_tx == 0, ( 'Must be divisible to ensure equal sized chunks') # Chunk it feat_to_tx_parts = torch.chunk(feat_to_tx, self.num_tx, split_dim) # Apply the corresponding transformation to each part if self.inp_type == 'pix': tx_fn = partial(self.transform_pix, mode=self.affine_tx_mode) elif self.inp_type == 'pt': tx_fn = self.transform_pt else: raise NotImplementedError('Unknown type {}'.format(self.inp_type)) feat_to_tx_parts_txed = [ tx_fn(el, theta[:, i, ...]) for i, el in enumerate(feat_to_tx_parts) ] return torch.cat(feat_to_tx_parts_txed, dim=split_dim), addl_losses
[ "def", "forward", "(", "self", ",", "feat_for_tx", ",", "feat_to_tx", ",", "split_dim", "=", "1", ")", ":", "feat_hist_embed", "=", "self", ".", "localization", "(", "feat_for_tx", ")", "# Average out the spatial dimension", "feat_hist_embed", "=", "torch", ".", "mean", "(", "feat_hist_embed", ",", "dim", "=", "[", "-", "2", ",", "-", "1", "]", ")", "addl_losses", "=", "{", "}", "if", "self", ".", "stochastic", ":", "pred", ",", "kl_loss", "=", "self", ".", "_compute_loc_stochastic", "(", "feat_hist_embed", ")", "addl_losses", "[", "'kl'", "]", "=", "kl_loss", "else", ":", "pred", "=", "self", ".", "fc_loc", "(", "feat_hist_embed", ")", "if", "self", ".", "dof", "!=", "'affine'", ":", "pred", "=", "pred", ".", "view", "(", "-", "1", ",", "self", ".", "num_tx", ",", "3", "*", "2", ")", "# Say the first number is actual angle, and next 2 are x, y", "angle", "=", "pred", "[", "...", ",", ":", "1", "]", "pos_x", "=", "pred", "[", "...", ",", "1", ":", "2", "]", "pos_y", "=", "pred", "[", "...", ",", "2", ":", "3", "]", "if", "self", ".", "dof", "==", "'rot-only'", ":", "pos_x", "=", "torch", ".", "zeros_like", "(", "pos_x", ")", "pos_y", "=", "torch", ".", "zeros_like", "(", "pos_y", ")", "elif", "self", ".", "dof", "==", "'trans-only'", ":", "angle", "=", "torch", ".", "zeros_like", "(", "angle", ")", "else", ":", "assert", "self", ".", "dof", "==", "'rot-trans-only'", ",", "'The only other option'", "cos_angle", "=", "torch", ".", "cos", "(", "angle", ")", "sin_angle", "=", "torch", ".", "sin", "(", "angle", ")", "# create the 2x3 matrix out of this", "theta", "=", "torch", ".", "cat", "(", "[", "cos_angle", ",", "sin_angle", ",", "pos_x", ",", "-", "sin_angle", ",", "cos_angle", ",", "pos_y", "]", ",", "dim", "=", "-", "1", ")", "theta", "=", "theta", ".", "view", "(", "theta", ".", "shape", "[", ":", "-", "1", "]", "+", "(", "2", ",", "3", ")", ")", "elif", "self", ".", "dof", "==", "'affine'", ":", "theta", "=", "pred", ".", "view", "(", "-", "1", ",", "self", ".", "num_tx", ",", "2", ",", "3", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unknown {}'", ".", "format", "(", "self", ".", "dof", ")", ")", "# Split the channels of feat_to_tx into num_tx groups, and apply the", "# transformations to each of those groups", "assert", "feat_to_tx", ".", "shape", "[", "split_dim", "]", "%", "self", ".", "num_tx", "==", "0", ",", "(", "'Must be divisible to ensure equal sized chunks'", ")", "# Chunk it", "feat_to_tx_parts", "=", "torch", ".", "chunk", "(", "feat_to_tx", ",", "self", ".", "num_tx", ",", "split_dim", ")", "# Apply the corresponding transformation to each part", "if", "self", ".", "inp_type", "==", "'pix'", ":", "tx_fn", "=", "partial", "(", "self", ".", "transform_pix", ",", "mode", "=", "self", ".", "affine_tx_mode", ")", "elif", "self", ".", "inp_type", "==", "'pt'", ":", "tx_fn", "=", "self", ".", "transform_pt", "else", ":", "raise", "NotImplementedError", "(", "'Unknown type {}'", ".", "format", "(", "self", ".", "inp_type", ")", ")", "feat_to_tx_parts_txed", "=", "[", "tx_fn", "(", "el", ",", "theta", "[", ":", ",", "i", ",", "...", "]", ")", "for", "i", ",", "el", "in", "enumerate", "(", "feat_to_tx_parts", ")", "]", "return", "torch", ".", "cat", "(", "feat_to_tx_parts_txed", ",", "dim", "=", "split_dim", ")", ",", "addl_losses" ]
[ 399, 4 ]
[ 460, 75 ]
python
en
['en', 'error', 'th']
False
DynSTN.forward
(self, features, pixels)
This dyn model does not use pixels, so will just return the last history frame Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pix addl_losses
This dyn model does not use pixels, so will just return the last history frame Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pix addl_losses
def forward(self, features, pixels): """ This dyn model does not use pixels, so will just return the last history frame Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pix addl_losses """ cat_feats = torch.reshape(features, (features.shape[0], -1) + features.shape[-2:]) # For > 1 objs, just flatten Nobj and D channels, and the STN class # will split it back to do the transformations feat_obj_flat = torch.flatten(features, 2, 3) new_feat, addl_loses = self.dyn(cat_feats, feat_obj_flat[:, -1, ...]) future_feat = torch.reshape(new_feat, features.shape[:1] + features.shape[2:]) return future_feat, pixels[:, -1, ...], addl_loses
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "cat_feats", "=", "torch", ".", "reshape", "(", "features", ",", "(", "features", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "# For > 1 objs, just flatten Nobj and D channels, and the STN class", "# will split it back to do the transformations", "feat_obj_flat", "=", "torch", ".", "flatten", "(", "features", ",", "2", ",", "3", ")", "new_feat", ",", "addl_loses", "=", "self", ".", "dyn", "(", "cat_feats", ",", "feat_obj_flat", "[", ":", ",", "-", "1", ",", "...", "]", ")", "future_feat", "=", "torch", ".", "reshape", "(", "new_feat", ",", "features", ".", "shape", "[", ":", "1", "]", "+", "features", ".", "shape", "[", "2", ":", "]", ")", "return", "future_feat", ",", "pixels", "[", ":", ",", "-", "1", ",", "...", "]", ",", "addl_loses" ]
[ 472, 4 ]
[ 492, 58 ]
python
en
['en', 'error', 'th']
False
DynSTNPixels_DEPRECATED.forward
(self, features, pixels)
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W')
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W')
def forward(self, features, pixels): """ Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W') """ raise NotImplementedError('Deal with objectified pixel input. ' 'Also deal with addl losses. ') cat_feats = torch.reshape(features, (features.shape[0], -1) + features.shape[-2:]) assert features.shape[2] == 1, 'Not implemented yet for >1 objs' # Repmat the image channels num_tx times, so STN can predict those many # transformations pixels_tiled = pixels.repeat(1, 1, self.num_tx, 1, 1) future_pixels_tiled = self.dyn(cat_feats, pixels_tiled[:, -1, ...]) # Compute attention maps for compositing attention_maps = self.attention(cat_feats) # Do a weighted sum of the channels using the attention maps attention_maps_split = torch.chunk(attention_maps, self.num_tx, 1) future_pixels_split = torch.chunk(future_pixels_tiled, self.num_tx, 1) weighted = [ att * pix for att, pix in zip(attention_maps_split, future_pixels_split) ] future_pixels = torch.mean(torch.stack(weighted), dim=0) # Since this is a new image being generated, need to pass through the # encoder to get the features for this image future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...] return future_feat, future_pixels
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "raise", "NotImplementedError", "(", "'Deal with objectified pixel input. '", "'Also deal with addl losses. '", ")", "cat_feats", "=", "torch", ".", "reshape", "(", "features", ",", "(", "features", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "assert", "features", ".", "shape", "[", "2", "]", "==", "1", ",", "'Not implemented yet for >1 objs'", "# Repmat the image channels num_tx times, so STN can predict those many", "# transformations", "pixels_tiled", "=", "pixels", ".", "repeat", "(", "1", ",", "1", ",", "self", ".", "num_tx", ",", "1", ",", "1", ")", "future_pixels_tiled", "=", "self", ".", "dyn", "(", "cat_feats", ",", "pixels_tiled", "[", ":", ",", "-", "1", ",", "...", "]", ")", "# Compute attention maps for compositing", "attention_maps", "=", "self", ".", "attention", "(", "cat_feats", ")", "# Do a weighted sum of the channels using the attention maps", "attention_maps_split", "=", "torch", ".", "chunk", "(", "attention_maps", ",", "self", ".", "num_tx", ",", "1", ")", "future_pixels_split", "=", "torch", ".", "chunk", "(", "future_pixels_tiled", ",", "self", ".", "num_tx", ",", "1", ")", "weighted", "=", "[", "att", "*", "pix", "for", "att", ",", "pix", "in", "zip", "(", "attention_maps_split", ",", "future_pixels_split", ")", "]", "future_pixels", "=", "torch", ".", "mean", "(", "torch", ".", "stack", "(", "weighted", ")", ",", "dim", "=", "0", ")", "# Since this is a new image being generated, need to pass through the", "# encoder to get the features for this image", "future_feat", "=", "self", ".", "enc", "(", "future_pixels", ".", "unsqueeze", "(", "1", ")", ")", "[", ":", ",", "0", ",", "...", "]", "return", "future_feat", ",", "future_pixels" ]
[ 510, 4 ]
[ 540, 41 ]
python
en
['en', 'error', 'th']
False
DynSTNPixelChannels_DEPRECATED.forward
(self, features, pixels)
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W')
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W')
def forward(self, features, pixels): """ Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W') """ raise NotImplementedError('Deal with objectified pixel input. ' 'Also deal with addl losses. ') assert (pixels.shape[2] == self.num_tx or pixels.shape[2] == self.num_tx * 3), 'In pix or pt mode so far' cat_feats = torch.reshape(features, (features.shape[0], -1) + features.shape[-2:]) assert features.shape[2] == 1, 'Not implemented yet for >1 objs' future_pixels = self.dyn(cat_feats, pixels[:, -1, ...]) # Since this is a new image being generated, need to pass through the # encoder to get the features for this image future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...] return future_feat, future_pixels
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "raise", "NotImplementedError", "(", "'Deal with objectified pixel input. '", "'Also deal with addl losses. '", ")", "assert", "(", "pixels", ".", "shape", "[", "2", "]", "==", "self", ".", "num_tx", "or", "pixels", ".", "shape", "[", "2", "]", "==", "self", ".", "num_tx", "*", "3", ")", ",", "'In pix or pt mode so far'", "cat_feats", "=", "torch", ".", "reshape", "(", "features", ",", "(", "features", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "assert", "features", ".", "shape", "[", "2", "]", "==", "1", ",", "'Not implemented yet for >1 objs'", "future_pixels", "=", "self", ".", "dyn", "(", "cat_feats", ",", "pixels", "[", ":", ",", "-", "1", ",", "...", "]", ")", "# Since this is a new image being generated, need to pass through the", "# encoder to get the features for this image", "future_feat", "=", "self", ".", "enc", "(", "future_pixels", ".", "unsqueeze", "(", "1", ")", ")", "[", ":", ",", "0", ",", "...", "]", "return", "future_feat", ",", "future_pixels" ]
[ 553, 4 ]
[ 572, 41 ]
python
en
['en', 'error', 'th']
False
DynSTNPixelChannelsGenBg_DEPRECATED.forward
(self, features, pixels)
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W')
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W')
def forward(self, features, pixels): """ Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, C, H, W) Returns: pred: (B, Nobj, D, H', W') """ raise NotImplementedError('Deal with objectified pixel input. ' 'Also deal with addl losses. ') assert (pixels.shape[2] - 1) == self.num_tx cat_feats = torch.reshape(features, (features.shape[0], -1) + features.shape[-2:]) assert features.shape[2] == 1, 'Not implemented yet for >1 objs' future_pixels_obj = self.dyn(cat_feats, pixels[:, -1, 1:, ...]) future_pixels_bg = self.bg_dec( torch.cat([pixels[:, -1, ...], future_pixels_obj], dim=1)) future_pixels = torch.cat([future_pixels_bg, future_pixels_obj], dim=1) # Since this is a new image being generated, need to pass through the # encoder to get the features for this image future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...] return future_feat, future_pixels
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "raise", "NotImplementedError", "(", "'Deal with objectified pixel input. '", "'Also deal with addl losses. '", ")", "assert", "(", "pixels", ".", "shape", "[", "2", "]", "-", "1", ")", "==", "self", ".", "num_tx", "cat_feats", "=", "torch", ".", "reshape", "(", "features", ",", "(", "features", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "assert", "features", ".", "shape", "[", "2", "]", "==", "1", ",", "'Not implemented yet for >1 objs'", "future_pixels_obj", "=", "self", ".", "dyn", "(", "cat_feats", ",", "pixels", "[", ":", ",", "-", "1", ",", "1", ":", ",", "...", "]", ")", "future_pixels_bg", "=", "self", ".", "bg_dec", "(", "torch", ".", "cat", "(", "[", "pixels", "[", ":", ",", "-", "1", ",", "...", "]", ",", "future_pixels_obj", "]", ",", "dim", "=", "1", ")", ")", "future_pixels", "=", "torch", ".", "cat", "(", "[", "future_pixels_bg", ",", "future_pixels_obj", "]", ",", "dim", "=", "1", ")", "# Since this is a new image being generated, need to pass through the", "# encoder to get the features for this image", "future_feat", "=", "self", ".", "enc", "(", "future_pixels", ".", "unsqueeze", "(", "1", ")", ")", "[", ":", ",", "0", ",", "...", "]", "return", "future_feat", ",", "future_pixels" ]
[ 600, 4 ]
[ 621, 41 ]
python
en
['en', 'error', 'th']
False
DynSTNPixelChannelsDetBg.forward
(self, features, pixels)
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pix addl_losses
Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pix addl_losses
def forward(self, features, pixels): """ Args: features: (B, T, Nobj, D, H', W') pixels: (B, T, Nobj, C, H, W) Returns: pred: (B, Nobj, D, H', W') pix addl_losses """ assert pixels.shape[3] >= self.num_tx cat_feats = torch.reshape(features, (features.shape[0], -1) + features.shape[-2:]) pixels_movable = pixels[:, -1, :, self.movable_channels, ...] # combine all channels of objects and transform pixels_movable_flat = torch.flatten(pixels_movable, 1, 2) future_pixels_flat_movable, addl_losses = self.dyn( cat_feats, pixels_movable_flat) future_pixels_movable = future_pixels_flat_movable.view( pixels_movable.shape) future_pixels = pixels[:, -1, ...] # Copy most of the channels future_pixels[:, :, self.movable_channels, ...] = future_pixels_movable # Compute the background deterministically, where all other channels # are 0s, it has to be 1. So make channels sum to 1. future_pixels_bg = 1.0 - torch.sum( future_pixels[:, :, 1:, ...], dim=2, keepdims=True) future_pixels[:, :, :1, ...] = future_pixels_bg # Since this is a new image being generated, need to pass through the # encoder to get the features for this image future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...] return future_feat, future_pixels, addl_losses
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "assert", "pixels", ".", "shape", "[", "3", "]", ">=", "self", ".", "num_tx", "cat_feats", "=", "torch", ".", "reshape", "(", "features", ",", "(", "features", ".", "shape", "[", "0", "]", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "pixels_movable", "=", "pixels", "[", ":", ",", "-", "1", ",", ":", ",", "self", ".", "movable_channels", ",", "...", "]", "# combine all channels of objects and transform", "pixels_movable_flat", "=", "torch", ".", "flatten", "(", "pixels_movable", ",", "1", ",", "2", ")", "future_pixels_flat_movable", ",", "addl_losses", "=", "self", ".", "dyn", "(", "cat_feats", ",", "pixels_movable_flat", ")", "future_pixels_movable", "=", "future_pixels_flat_movable", ".", "view", "(", "pixels_movable", ".", "shape", ")", "future_pixels", "=", "pixels", "[", ":", ",", "-", "1", ",", "...", "]", "# Copy most of the channels", "future_pixels", "[", ":", ",", ":", ",", "self", ".", "movable_channels", ",", "...", "]", "=", "future_pixels_movable", "# Compute the background deterministically, where all other channels", "# are 0s, it has to be 1. So make channels sum to 1.", "future_pixels_bg", "=", "1.0", "-", "torch", ".", "sum", "(", "future_pixels", "[", ":", ",", ":", ",", "1", ":", ",", "...", "]", ",", "dim", "=", "2", ",", "keepdims", "=", "True", ")", "future_pixels", "[", ":", ",", ":", ",", ":", "1", ",", "...", "]", "=", "future_pixels_bg", "# Since this is a new image being generated, need to pass through the", "# encoder to get the features for this image", "future_feat", "=", "self", ".", "enc", "(", "future_pixels", ".", "unsqueeze", "(", "1", ")", ")", "[", ":", ",", "0", ",", "...", "]", "return", "future_feat", ",", "future_pixels", ",", "addl_losses" ]
[ 651, 4 ]
[ 681, 54 ]
python
en
['en', 'error', 'th']
False
BasicDecoder.forward
(self, features, pixels)
Args: features (BxNobjxDxH'xW'): Features to be decoded pixels (BxNobjxCxHxW): Pixels generated by the dynamics model Returns: imgs (BxNobjxD_outxHxW): Output frames (per obj, aggregation is done later in the Fwd class)
Args: features (BxNobjxDxH'xW'): Features to be decoded pixels (BxNobjxCxHxW): Pixels generated by the dynamics model Returns: imgs (BxNobjxD_outxHxW): Output frames (per obj, aggregation is done later in the Fwd class)
def forward(self, features, pixels): """ Args: features (BxNobjxDxH'xW'): Features to be decoded pixels (BxNobjxCxHxW): Pixels generated by the dynamics model Returns: imgs (BxNobjxD_outxHxW): Output frames (per obj, aggregation is done later in the Fwd class) """ if self.decode_from == 'pixels': decode_feature = pixels else: decode_feature = features if not self.backprop_feat_ext: # Means train the decoder separately from the rest of the network, # don't backprop gradients to the feature extractor decode_feature = decode_feature.detach() # Summing the features over all the objects, and doing one decode. # Separate decodes takes just way too much time, so need to do it once decode_feature = torch.sum(decode_feature, dim=1, keepdims=True) features_flatten_obj = torch.flatten(decode_feature, 0, 1) images = self.deconv_net(features_flatten_obj) # Reshape back into object level out = torch.reshape(images, decode_feature.shape[:2] + images.shape[1:]) return out
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "if", "self", ".", "decode_from", "==", "'pixels'", ":", "decode_feature", "=", "pixels", "else", ":", "decode_feature", "=", "features", "if", "not", "self", ".", "backprop_feat_ext", ":", "# Means train the decoder separately from the rest of the network,", "# don't backprop gradients to the feature extractor", "decode_feature", "=", "decode_feature", ".", "detach", "(", ")", "# Summing the features over all the objects, and doing one decode.", "# Separate decodes takes just way too much time, so need to do it once", "decode_feature", "=", "torch", ".", "sum", "(", "decode_feature", ",", "dim", "=", "1", ",", "keepdims", "=", "True", ")", "features_flatten_obj", "=", "torch", ".", "flatten", "(", "decode_feature", ",", "0", ",", "1", ")", "images", "=", "self", ".", "deconv_net", "(", "features_flatten_obj", ")", "# Reshape back into object level", "out", "=", "torch", ".", "reshape", "(", "images", ",", "decode_feature", ".", "shape", "[", ":", "2", "]", "+", "images", ".", "shape", "[", "1", ":", "]", ")", "return", "out" ]
[ 750, 4 ]
[ 775, 18 ]
python
en
['en', 'error', 'th']
False
TrivialDecoder.forward
(self, features, pixels)
Args: features (BxNobjxDxH'xW'): Features to be decoded pixels (BxNobjxCxHxW): Pixels generated by the dynamics model Returns: imgs (BxNobjxCxHxW): Output frames
Args: features (BxNobjxDxH'xW'): Features to be decoded pixels (BxNobjxCxHxW): Pixels generated by the dynamics model Returns: imgs (BxNobjxCxHxW): Output frames
def forward(self, features, pixels): """ Args: features (BxNobjxDxH'xW'): Features to be decoded pixels (BxNobjxCxHxW): Pixels generated by the dynamics model Returns: imgs (BxNobjxCxHxW): Output frames """ del features # assumes the dynamics model will do all decoding return pixels
[ "def", "forward", "(", "self", ",", "features", ",", "pixels", ")", ":", "del", "features", "# assumes the dynamics model will do all decoding", "return", "pixels" ]
[ 784, 4 ]
[ 793, 21 ]
python
en
['en', 'error', 'th']
False
BasicObjEncoder.forward
(self, feat)
Args: feat: (B, T, Nobj, D, H', W')
Args: feat: (B, T, Nobj, D, H', W')
def forward(self, feat): """ Args: feat: (B, T, Nobj, D, H', W') """ if self.encoder: feat_flat = torch.flatten(feat, 0, 2) obj_embed_flat = self.encoder(feat_flat) obj_embed = torch.reshape( obj_embed_flat, feat.shape[:3] + obj_embed_flat.shape[1:]) else: obj_embed = feat if self.spatial_mean: obj_embed = torch.mean(obj_embed, dim=[-1, -2], keepdims=True) return obj_embed
[ "def", "forward", "(", "self", ",", "feat", ")", ":", "if", "self", ".", "encoder", ":", "feat_flat", "=", "torch", ".", "flatten", "(", "feat", ",", "0", ",", "2", ")", "obj_embed_flat", "=", "self", ".", "encoder", "(", "feat_flat", ")", "obj_embed", "=", "torch", ".", "reshape", "(", "obj_embed_flat", ",", "feat", ".", "shape", "[", ":", "3", "]", "+", "obj_embed_flat", ".", "shape", "[", "1", ":", "]", ")", "else", ":", "obj_embed", "=", "feat", "if", "self", ".", "spatial_mean", ":", "obj_embed", "=", "torch", ".", "mean", "(", "obj_embed", ",", "dim", "=", "[", "-", "1", ",", "-", "2", "]", ",", "keepdims", "=", "True", ")", "return", "obj_embed" ]
[ 854, 4 ]
[ 868, 24 ]
python
en
['en', 'error', 'th']
False
ContextGatingObjectifier.forward
(self, vid_feat)
Decompose the video features into object level representation. Args: vid_feat: (BxTxDxH'xW') nobj (int): Max number of objects in the scene. The hope is that the extra channels will just have some degenerate information Returns: BxTxNobjxDxH''xW''
Decompose the video features into object level representation. Args: vid_feat: (BxTxDxH'xW') nobj (int): Max number of objects in the scene. The hope is that the extra channels will just have some degenerate information Returns: BxTxNobjxDxH''xW''
def forward(self, vid_feat): """ Decompose the video features into object level representation. Args: vid_feat: (BxTxDxH'xW') nobj (int): Max number of objects in the scene. The hope is that the extra channels will just have some degenerate information Returns: BxTxNobjxDxH''xW'' """ raise NotImplementedError('The inp is now objfied, TODO deal with it') batch_size = vid_feat.shape[0] # Use context gating: generate a heatmap for each object at each time # step, and weight using that heatmap to get an object representation flatten_feat = torch.flatten(vid_feat, 0, 1) # Unsqueeze to add a channel dimension to the attention maps obj_map = self.obj_mapper(flatten_feat).unsqueeze(2) # Add a 1-D object dimension flatten_feat = flatten_feat.unsqueeze(1) # Weight the feats with the attention maps to get the object-features mapped_feat = flatten_feat * obj_map # Reshape to add the time dimension back mapped_feat = torch.reshape(mapped_feat, (batch_size, -1) + mapped_feat.shape[1:]) final_feat = self.obj_encoder(mapped_feat) return final_feat
[ "def", "forward", "(", "self", ",", "vid_feat", ")", ":", "raise", "NotImplementedError", "(", "'The inp is now objfied, TODO deal with it'", ")", "batch_size", "=", "vid_feat", ".", "shape", "[", "0", "]", "# Use context gating: generate a heatmap for each object at each time", "# step, and weight using that heatmap to get an object representation", "flatten_feat", "=", "torch", ".", "flatten", "(", "vid_feat", ",", "0", ",", "1", ")", "# Unsqueeze to add a channel dimension to the attention maps", "obj_map", "=", "self", ".", "obj_mapper", "(", "flatten_feat", ")", ".", "unsqueeze", "(", "2", ")", "# Add a 1-D object dimension", "flatten_feat", "=", "flatten_feat", ".", "unsqueeze", "(", "1", ")", "# Weight the feats with the attention maps to get the object-features", "mapped_feat", "=", "flatten_feat", "*", "obj_map", "# Reshape to add the time dimension back", "mapped_feat", "=", "torch", ".", "reshape", "(", "mapped_feat", ",", "(", "batch_size", ",", "-", "1", ")", "+", "mapped_feat", ".", "shape", "[", "1", ":", "]", ")", "final_feat", "=", "self", ".", "obj_encoder", "(", "mapped_feat", ")", "return", "final_feat" ]
[ 887, 4 ]
[ 912, 25 ]
python
en
['en', 'error', 'th']
False
ChannelSplitObjectifier.forward
(self, vid_feat)
Decompose the video features into object level representation. Args: vid_feat: (BxTxNobjxDxH'xW') Returns: BxTxNobjx(D/Nobj)xH'xW'
Decompose the video features into object level representation. Args: vid_feat: (BxTxNobjxDxH'xW') Returns: BxTxNobjx(D/Nobj)xH'xW'
def forward(self, vid_feat): """ Decompose the video features into object level representation. Args: vid_feat: (BxTxNobjxDxH'xW') Returns: BxTxNobjx(D/Nobj)xH'xW' """ assert vid_feat.shape[2] == 1, ( 'Channel split can not deal with pre objectified {} input'.format( vid_feat.shape[2])) assert vid_feat.shape[3] % self.nobj == 0, 'Must be divisible' # Reshape the channel dimension to split into an object dimension objed = vid_feat.view(vid_feat.shape[:2] + (self.nobj, -1) + vid_feat.shape[-2:]) assert objed.shape[2] == self.nobj assert objed.shape[3] == vid_feat.shape[3] / self.nobj # Apply a little network to get a flat feature obj_encoded = self.obj_encoder(objed) return obj_encoded
[ "def", "forward", "(", "self", ",", "vid_feat", ")", ":", "assert", "vid_feat", ".", "shape", "[", "2", "]", "==", "1", ",", "(", "'Channel split can not deal with pre objectified {} input'", ".", "format", "(", "vid_feat", ".", "shape", "[", "2", "]", ")", ")", "assert", "vid_feat", ".", "shape", "[", "3", "]", "%", "self", ".", "nobj", "==", "0", ",", "'Must be divisible'", "# Reshape the channel dimension to split into an object dimension", "objed", "=", "vid_feat", ".", "view", "(", "vid_feat", ".", "shape", "[", ":", "2", "]", "+", "(", "self", ".", "nobj", ",", "-", "1", ")", "+", "vid_feat", ".", "shape", "[", "-", "2", ":", "]", ")", "assert", "objed", ".", "shape", "[", "2", "]", "==", "self", ".", "nobj", "assert", "objed", ".", "shape", "[", "3", "]", "==", "vid_feat", ".", "shape", "[", "3", "]", "/", "self", ".", "nobj", "# Apply a little network to get a flat feature", "obj_encoded", "=", "self", ".", "obj_encoder", "(", "objed", ")", "return", "obj_encoded" ]
[ 923, 4 ]
[ 942, 26 ]
python
en
['en', 'error', 'th']
False
SimpleBaseEncoder.__init__
(self, in_dim, width_scale_factor)
Simple encoder weights. For a 256x256 input, it'll give a 4x4 output.
Simple encoder weights. For a 256x256 input, it'll give a 4x4 output.
def __init__(self, in_dim, width_scale_factor): """Simple encoder weights. For a 256x256 input, it'll give a 4x4 output.""" super().__init__() self.width_scale_factor = width_scale_factor _s = self._scale_int self.stem = nn.Sequential( nn.Conv2d(in_dim, 3, kernel_size=1, bias=False), nn.BatchNorm2d(3), nn.ReLU(inplace=True), nn.Conv2d(3, _s(64), kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(_s(64)), nn.ReLU(inplace=True), nn.Conv2d(_s(64), _s(64), kernel_size=5, stride=2, padding=2, bias=False), nn.BatchNorm2d(_s(64)), nn.ReLU(inplace=True), nn.Conv2d(_s(64), _s(64), kernel_size=5, stride=2, padding=2, bias=False), nn.BatchNorm2d(_s(64)), nn.ReLU(inplace=True), nn.Conv2d(_s(64), _s(64), kernel_size=5, stride=2, padding=2, bias=False), nn.BatchNorm2d(_s(64)), nn.ReLU(inplace=True), nn.Conv2d(_s(64), _s(128), kernel_size=5, stride=2, padding=2, bias=False), nn.BatchNorm2d(_s(128)), nn.ReLU(inplace=True), ) self.out_dim = _s(128)
[ "def", "__init__", "(", "self", ",", "in_dim", ",", "width_scale_factor", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "width_scale_factor", "=", "width_scale_factor", "_s", "=", "self", ".", "_scale_int", "self", ".", "stem", "=", "nn", ".", "Sequential", "(", "nn", ".", "Conv2d", "(", "in_dim", ",", "3", ",", "kernel_size", "=", "1", ",", "bias", "=", "False", ")", ",", "nn", ".", "BatchNorm2d", "(", "3", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", "nn", ".", "Conv2d", "(", "3", ",", "_s", "(", "64", ")", ",", "kernel_size", "=", "7", ",", "stride", "=", "2", ",", "padding", "=", "3", ",", "bias", "=", "False", ")", ",", "nn", ".", "BatchNorm2d", "(", "_s", "(", "64", ")", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", "nn", ".", "Conv2d", "(", "_s", "(", "64", ")", ",", "_s", "(", "64", ")", ",", "kernel_size", "=", "5", ",", "stride", "=", "2", ",", "padding", "=", "2", ",", "bias", "=", "False", ")", ",", "nn", ".", "BatchNorm2d", "(", "_s", "(", "64", ")", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", "nn", ".", "Conv2d", "(", "_s", "(", "64", ")", ",", "_s", "(", "64", ")", ",", "kernel_size", "=", "5", ",", "stride", "=", "2", ",", "padding", "=", "2", ",", "bias", "=", "False", ")", ",", "nn", ".", "BatchNorm2d", "(", "_s", "(", "64", ")", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", "nn", ".", "Conv2d", "(", "_s", "(", "64", ")", ",", "_s", "(", "64", ")", ",", "kernel_size", "=", "5", ",", "stride", "=", "2", ",", "padding", "=", "2", ",", "bias", "=", "False", ")", ",", "nn", ".", "BatchNorm2d", "(", "_s", "(", "64", ")", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", "nn", ".", "Conv2d", "(", "_s", "(", "64", ")", ",", "_s", "(", "128", ")", ",", "kernel_size", "=", "5", ",", "stride", "=", "2", ",", "padding", "=", "2", ",", "bias", "=", "False", ")", ",", "nn", ".", "BatchNorm2d", "(", "_s", "(", "128", ")", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", ")", "self", ".", "out_dim", "=", "_s", "(", "128", ")" ]
[ 965, 4 ]
[ 1016, 30 ]
python
af
['es', 'af', 'en']
False
SimpleBaseEncoder._scale_int
(self, n)
Scale the number by a factor. To control width of this network.
Scale the number by a factor. To control width of this network.
def _scale_int(self, n): """Scale the number by a factor. To control width of this network.""" return int(self.width_scale_factor * n)
[ "def", "_scale_int", "(", "self", ",", "n", ")", ":", "return", "int", "(", "self", ".", "width_scale_factor", "*", "n", ")" ]
[ 1018, 4 ]
[ 1020, 47 ]
python
en
['en', 'en', 'en']
True
BasicEncoder.__init__
(self, in_dim, nobj, feat_ext, objectifier, obj_encoder, spatial_mean, feat_ext_eval_mode, process_objs_together)
Args: obj_before_enc: If true, do the objectify in the input (pixel) space before running the encode (so each object is encoded separately) spatial_mean: Avg pool the features to 1x1 feat_ext_eval_mode: Set the feature extractor to eval mode for BN, dropout etc process_objs_together: If true, it will concatenate all objs on the channel dimension, extract features, and split the features in channel dimensions to get features for each obj
Args: obj_before_enc: If true, do the objectify in the input (pixel) space before running the encode (so each object is encoded separately) spatial_mean: Avg pool the features to 1x1 feat_ext_eval_mode: Set the feature extractor to eval mode for BN, dropout etc process_objs_together: If true, it will concatenate all objs on the channel dimension, extract features, and split the features in channel dimensions to get features for each obj
def __init__(self, in_dim, nobj, feat_ext, objectifier, obj_encoder, spatial_mean, feat_ext_eval_mode, process_objs_together): """ Args: obj_before_enc: If true, do the objectify in the input (pixel) space before running the encode (so each object is encoded separately) spatial_mean: Avg pool the features to 1x1 feat_ext_eval_mode: Set the feature extractor to eval mode for BN, dropout etc process_objs_together: If true, it will concatenate all objs on the channel dimension, extract features, and split the features in channel dimensions to get features for each obj """ super().__init__() self.nobj = nobj self.process_objs_together = process_objs_together # The image embedding model self.feat_ext = hydra.utils.instantiate( feat_ext, in_dim * nobj if self.process_objs_together else in_dim) initial_dim = self.feat_ext.out_dim # The objects model self.objectifier = hydra.utils.instantiate(objectifier, initial_dim, obj_encoder) self.out_dim = self.objectifier.out_dim if self.process_objs_together: assert self.out_dim % nobj == 0 self.out_dim //= nobj self.spatial_mean = spatial_mean self.feat_ext_eval_mode = feat_ext_eval_mode
[ "def", "__init__", "(", "self", ",", "in_dim", ",", "nobj", ",", "feat_ext", ",", "objectifier", ",", "obj_encoder", ",", "spatial_mean", ",", "feat_ext_eval_mode", ",", "process_objs_together", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "nobj", "=", "nobj", "self", ".", "process_objs_together", "=", "process_objs_together", "# The image embedding model", "self", ".", "feat_ext", "=", "hydra", ".", "utils", ".", "instantiate", "(", "feat_ext", ",", "in_dim", "*", "nobj", "if", "self", ".", "process_objs_together", "else", "in_dim", ")", "initial_dim", "=", "self", ".", "feat_ext", ".", "out_dim", "# The objects model", "self", ".", "objectifier", "=", "hydra", ".", "utils", ".", "instantiate", "(", "objectifier", ",", "initial_dim", ",", "obj_encoder", ")", "self", ".", "out_dim", "=", "self", ".", "objectifier", ".", "out_dim", "if", "self", ".", "process_objs_together", ":", "assert", "self", ".", "out_dim", "%", "nobj", "==", "0", "self", ".", "out_dim", "//=", "nobj", "self", ".", "spatial_mean", "=", "spatial_mean", "self", ".", "feat_ext_eval_mode", "=", "feat_ext_eval_mode" ]
[ 1057, 4 ]
[ 1085, 52 ]
python
en
['en', 'error', 'th']
False
BasicEncoder._forward_vid
(self, batch_vid_obs, l2_norm_feats=False)
Convert a video into images to run the forward model. Args: batch_vid_obs: BxTxCxHxW or BxTxNobjxCxHxW Returns: features: BxTxDxH'xW' or BxTxNobjxDxH'xW'
Convert a video into images to run the forward model. Args: batch_vid_obs: BxTxCxHxW or BxTxNobjxCxHxW Returns: features: BxTxDxH'xW' or BxTxNobjxDxH'xW'
def _forward_vid(self, batch_vid_obs, l2_norm_feats=False): """ Convert a video into images to run the forward model. Args: batch_vid_obs: BxTxCxHxW or BxTxNobjxCxHxW Returns: features: BxTxDxH'xW' or BxTxNobjxDxH'xW' """ # Add an object dimension, so the rest of the code doesn't have to # deal with edge cases added_obj_dim = False if len(batch_vid_obs.shape) == 4: added_obj_dim = True batch_vid_obs = batch_vid_obs.unsqueeze(2) # BxTxNobjxCxHxW # Flatten videos into frames to extract out the features # resulting shape B'xC'xHxW if self.process_objs_together: # resulting shape B' = B * T, C' = Nobj * C flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-4:]) flat_obs = torch.flatten(flat_obs, 1, 2) else: # resulting shape B' = B * T * Nobj, C' = C flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-3:]) # Extract features if self.feat_ext_eval_mode: self.feat_ext.eval() features = self.feat_ext(flat_obs) if self.spatial_mean: # Mean over spatial dimensions features = torch.mean(features, dim=[-2, -1], keepdims=True) if l2_norm_feats: # L2 normalize the features -- MemoryBank, MoCo and PIRL do that features = nn.functional.normalize(features, p=2, dim=-1) # Reshape back to original batch dimension if self.process_objs_together: features_batched = features.reshape(batch_vid_obs.shape[:2] + (self.nobj, -1) + features.shape[-2:]) else: features_batched = features.reshape(batch_vid_obs.shape[:-3] + features.shape[1:]) if added_obj_dim: features_batched = features_batched.squeeze(2) assert features_batched.shape[-3] == self.out_dim return features_batched
[ "def", "_forward_vid", "(", "self", ",", "batch_vid_obs", ",", "l2_norm_feats", "=", "False", ")", ":", "# Add an object dimension, so the rest of the code doesn't have to", "# deal with edge cases", "added_obj_dim", "=", "False", "if", "len", "(", "batch_vid_obs", ".", "shape", ")", "==", "4", ":", "added_obj_dim", "=", "True", "batch_vid_obs", "=", "batch_vid_obs", ".", "unsqueeze", "(", "2", ")", "# BxTxNobjxCxHxW", "# Flatten videos into frames to extract out the features", "# resulting shape B'xC'xHxW", "if", "self", ".", "process_objs_together", ":", "# resulting shape B' = B * T, C' = Nobj * C", "flat_obs", "=", "batch_vid_obs", ".", "reshape", "(", "(", "-", "1", ",", ")", "+", "batch_vid_obs", ".", "shape", "[", "-", "4", ":", "]", ")", "flat_obs", "=", "torch", ".", "flatten", "(", "flat_obs", ",", "1", ",", "2", ")", "else", ":", "# resulting shape B' = B * T * Nobj, C' = C", "flat_obs", "=", "batch_vid_obs", ".", "reshape", "(", "(", "-", "1", ",", ")", "+", "batch_vid_obs", ".", "shape", "[", "-", "3", ":", "]", ")", "# Extract features", "if", "self", ".", "feat_ext_eval_mode", ":", "self", ".", "feat_ext", ".", "eval", "(", ")", "features", "=", "self", ".", "feat_ext", "(", "flat_obs", ")", "if", "self", ".", "spatial_mean", ":", "# Mean over spatial dimensions", "features", "=", "torch", ".", "mean", "(", "features", ",", "dim", "=", "[", "-", "2", ",", "-", "1", "]", ",", "keepdims", "=", "True", ")", "if", "l2_norm_feats", ":", "# L2 normalize the features -- MemoryBank, MoCo and PIRL do that", "features", "=", "nn", ".", "functional", ".", "normalize", "(", "features", ",", "p", "=", "2", ",", "dim", "=", "-", "1", ")", "# Reshape back to original batch dimension", "if", "self", ".", "process_objs_together", ":", "features_batched", "=", "features", ".", "reshape", "(", "batch_vid_obs", ".", "shape", "[", ":", "2", "]", "+", "(", "self", ".", "nobj", ",", "-", "1", ")", "+", "features", ".", "shape", "[", "-", "2", ":", "]", ")", "else", ":", "features_batched", "=", "features", ".", "reshape", "(", "batch_vid_obs", ".", "shape", "[", ":", "-", "3", "]", "+", "features", ".", "shape", "[", "1", ":", "]", ")", "if", "added_obj_dim", ":", "features_batched", "=", "features_batched", ".", "squeeze", "(", "2", ")", "assert", "features_batched", ".", "shape", "[", "-", "3", "]", "==", "self", ".", "out_dim", "return", "features_batched" ]
[ 1087, 4 ]
[ 1131, 31 ]
python
en
['en', 'error', 'th']
False
BasicEncoder.forward
(self, vid)
Args: vid (B, T, Nobj, C, H, W): Input video, in preprocessed form; i.e. one-hot Returns: obj_feat (B, T, Nobj', D, H', W'): Features with objects, if needed
Args: vid (B, T, Nobj, C, H, W): Input video, in preprocessed form; i.e. one-hot Returns: obj_feat (B, T, Nobj', D, H', W'): Features with objects, if needed
def forward(self, vid): """ Args: vid (B, T, Nobj, C, H, W): Input video, in preprocessed form; i.e. one-hot Returns: obj_feat (B, T, Nobj', D, H', W'): Features with objects, if needed """ vid_feat = self._forward_vid(vid) vid_feat = self.objectifier(vid_feat) return vid_feat
[ "def", "forward", "(", "self", ",", "vid", ")", ":", "vid_feat", "=", "self", ".", "_forward_vid", "(", "vid", ")", "vid_feat", "=", "self", ".", "objectifier", "(", "vid_feat", ")", "return", "vid_feat" ]
[ 1133, 4 ]
[ 1143, 23 ]
python
en
['en', 'error', 'th']
False
MLPClassifier.forward
(self, preds, pixs, process_all_frames=False)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (BxT) process_all_frames: Set true when used by other classifiers for intermediate feature extraction, so to get features for each frame.
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (BxT) process_all_frames: Set true when used by other classifiers for intermediate feature extraction, so to get features for each frame.
def forward(self, preds, pixs, process_all_frames=False): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (BxT) process_all_frames: Set true when used by other classifiers for intermediate feature extraction, so to get features for each frame. """ del pixs # This does not use it if self.nlayers == 0: return preds # Since this classifier doesn't take into account context and the final # _cls is going to look at the last frame, so might as well only process # that last frame if not process_all_frames: preds = preds[:, -1:, ...] mean_feat = torch.mean(preds, axis=[2, -1, -2]) if self.match_inp_sz_layer: if self.init_linear_wt is None: logging.warning( 'Creating a linear layer to map the input ' 'dims (%d) to MLP input dim (%d)', mean_feat.shape[-1], self.in_dim) self.reset_parameters(preds, self.in_dim, preds.shape[1] * preds.shape[3]) mean_feat = nn.functional.linear(mean_feat, self.init_linear_wt) mean_feat = nn.ReLU(inplace=True)(mean_feat) return self.cls(mean_feat).squeeze(-1)
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ",", "process_all_frames", "=", "False", ")", ":", "del", "pixs", "# This does not use it", "if", "self", ".", "nlayers", "==", "0", ":", "return", "preds", "# Since this classifier doesn't take into account context and the final", "# _cls is going to look at the last frame, so might as well only process", "# that last frame", "if", "not", "process_all_frames", ":", "preds", "=", "preds", "[", ":", ",", "-", "1", ":", ",", "...", "]", "mean_feat", "=", "torch", ".", "mean", "(", "preds", ",", "axis", "=", "[", "2", ",", "-", "1", ",", "-", "2", "]", ")", "if", "self", ".", "match_inp_sz_layer", ":", "if", "self", ".", "init_linear_wt", "is", "None", ":", "logging", ".", "warning", "(", "'Creating a linear layer to map the input '", "'dims (%d) to MLP input dim (%d)'", ",", "mean_feat", ".", "shape", "[", "-", "1", "]", ",", "self", ".", "in_dim", ")", "self", ".", "reset_parameters", "(", "preds", ",", "self", ".", "in_dim", ",", "preds", ".", "shape", "[", "1", "]", "*", "preds", ".", "shape", "[", "3", "]", ")", "mean_feat", "=", "nn", ".", "functional", ".", "linear", "(", "mean_feat", ",", "self", ".", "init_linear_wt", ")", "mean_feat", "=", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", "(", "mean_feat", ")", "return", "self", ".", "cls", "(", "mean_feat", ")", ".", "squeeze", "(", "-", "1", ")" ]
[ 1181, 4 ]
[ 1212, 46 ]
python
en
['en', 'error', 'th']
False
ConvNetClassifier.forward
(self, preds, pixs, process_all_frames=False)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) process_all_frames: Set true when used by other classifiers for intermediate feature extraction, so to get features for each frame. Retuns: solved: (BxT)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) process_all_frames: Set true when used by other classifiers for intermediate feature extraction, so to get features for each frame. Retuns: solved: (BxT)
def forward(self, preds, pixs, process_all_frames=False): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) process_all_frames: Set true when used by other classifiers for intermediate feature extraction, so to get features for each frame. Retuns: solved: (BxT) """ # Not enforcing the assert here if pred is None, since this module # is usually used by other modules as a way to extract features, # and it might pass in None for preds. But rest assured, this check # would have been done on the caller side. assert preds is None or preds.shape[1] == pixs.shape[1], ( 'Must pass in run_decode=True if using a pixel-based classifier!!') del preds # This does not use it # Since this classifier doesn't take into account context and the final # _cls is going to look at the last frame, so might as well only process # that last frame if not process_all_frames: pixs = pixs[:, -1:, ...] obj_feats = self.enc(pixs) return self.cls(obj_feats, None, process_all_frames=process_all_frames)
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ",", "process_all_frames", "=", "False", ")", ":", "# Not enforcing the assert here if pred is None, since this module", "# is usually used by other modules as a way to extract features,", "# and it might pass in None for preds. But rest assured, this check", "# would have been done on the caller side.", "assert", "preds", "is", "None", "or", "preds", ".", "shape", "[", "1", "]", "==", "pixs", ".", "shape", "[", "1", "]", ",", "(", "'Must pass in run_decode=True if using a pixel-based classifier!!'", ")", "del", "preds", "# This does not use it", "# Since this classifier doesn't take into account context and the final", "# _cls is going to look at the last frame, so might as well only process", "# that last frame", "if", "not", "process_all_frames", ":", "pixs", "=", "pixs", "[", ":", ",", "-", "1", ":", ",", "...", "]", "obj_feats", "=", "self", ".", "enc", "(", "pixs", ")", "return", "self", ".", "cls", "(", "obj_feats", ",", "None", ",", "process_all_frames", "=", "process_all_frames", ")" ]
[ 1256, 4 ]
[ 1281, 79 ]
python
en
['en', 'error', 'th']
False
TxClassifier.forward
(self, preds, pixs)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
def forward(self, preds, pixs): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1) """ del pixs # This does not use it # Spatial mean the features stacked_mean_feat = torch.flatten(torch.mean(preds, axis=[-1, -2]), 1, 2) feat_enc_time = self.cls(self.tx_enc(stacked_mean_feat)) # Max pool over time to get the final prediction # Keepdims since the output format expects a time dimension and does # a max pool over it at the end cls_pred = torch.max(feat_enc_time, dim=1, keepdims=True)[0].squeeze(-1) return cls_pred
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ")", ":", "del", "pixs", "# This does not use it", "# Spatial mean the features", "stacked_mean_feat", "=", "torch", ".", "flatten", "(", "torch", ".", "mean", "(", "preds", ",", "axis", "=", "[", "-", "1", ",", "-", "2", "]", ")", ",", "1", ",", "2", ")", "feat_enc_time", "=", "self", ".", "cls", "(", "self", ".", "tx_enc", "(", "stacked_mean_feat", ")", ")", "# Max pool over time to get the final prediction", "# Keepdims since the output format expects a time dimension and does", "# a max pool over it at the end", "cls_pred", "=", "torch", ".", "max", "(", "feat_enc_time", ",", "dim", "=", "1", ",", "keepdims", "=", "True", ")", "[", "0", "]", ".", "squeeze", "(", "-", "1", ")", "return", "cls_pred" ]
[ 1291, 4 ]
[ 1310, 23 ]
python
en
['en', 'error', 'th']
False
ConvTxClassifier.forward
(self, preds, pixs)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
def forward(self, preds, pixs): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1) """ assert preds.shape[1] == pixs.shape[1], ( 'Must pass in run_decode=True if using a pixel-based classifier!!') del preds feats = self.conv_feat(None, pixs, process_all_frames=True) preds = self.tx_cls(feats, None) return preds
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ")", ":", "assert", "preds", ".", "shape", "[", "1", "]", "==", "pixs", ".", "shape", "[", "1", "]", ",", "(", "'Must pass in run_decode=True if using a pixel-based classifier!!'", ")", "del", "preds", "feats", "=", "self", ".", "conv_feat", "(", "None", ",", "pixs", ",", "process_all_frames", "=", "True", ")", "preds", "=", "self", ".", "tx_cls", "(", "feats", ",", "None", ")", "return", "preds" ]
[ 1320, 4 ]
[ 1334, 20 ]
python
en
['en', 'error', 'th']
False
Conv3dClassifier.forward
(self, preds, pixs)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
def forward(self, preds, pixs): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1) """ del pixs enc_preds = self.enc(preds.squeeze(2).transpose(1, 2)) cls_preds = self.cls(torch.mean(enc_preds, [-1, -2, -3])) # It has 1 extra dim in the end from the fc layer which should be # removed, but since I need to add a time dimension anyway, just leave # this there (will end up the same) return cls_preds
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ")", ":", "del", "pixs", "enc_preds", "=", "self", ".", "enc", "(", "preds", ".", "squeeze", "(", "2", ")", ".", "transpose", "(", "1", ",", "2", ")", ")", "cls_preds", "=", "self", ".", "cls", "(", "torch", ".", "mean", "(", "enc_preds", ",", "[", "-", "1", ",", "-", "2", ",", "-", "3", "]", ")", ")", "# It has 1 extra dim in the end from the fc layer which should be", "# removed, but since I need to add a time dimension anyway, just leave", "# this there (will end up the same)", "return", "cls_preds" ]
[ 1349, 4 ]
[ 1364, 24 ]
python
en
['en', 'error', 'th']
False
ConvConv3dClassifier.forward
(self, preds, pixs)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
def forward(self, preds, pixs): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1) """ assert preds.shape[1] == pixs.shape[1], ( 'Must pass in run_decode=True if using a pixel-based classifier!!') del preds feats = self.conv_feat(None, pixs, process_all_frames=True) preds = self.td_cls(feats, None) return preds
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ")", ":", "assert", "preds", ".", "shape", "[", "1", "]", "==", "pixs", ".", "shape", "[", "1", "]", ",", "(", "'Must pass in run_decode=True if using a pixel-based classifier!!'", ")", "del", "preds", "feats", "=", "self", ".", "conv_feat", "(", "None", ",", "pixs", ",", "process_all_frames", "=", "True", ")", "preds", "=", "self", ".", "td_cls", "(", "feats", ",", "None", ")", "return", "preds" ]
[ 1374, 4 ]
[ 1388, 20 ]
python
en
['en', 'error', 'th']
False
ConcatClassifier.forward
(self, preds, pixs)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
def forward(self, preds, pixs): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1) """ del pixs # Concatenate over the time dimension preds_flat = preds.view(preds.shape[0], 1, 1, -1, preds.shape[-2], preds.shape[-1]) return self.cls(preds_flat, None, process_all_frames=True)
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ")", ":", "del", "pixs", "# Concatenate over the time dimension", "preds_flat", "=", "preds", ".", "view", "(", "preds", ".", "shape", "[", "0", "]", ",", "1", ",", "1", ",", "-", "1", ",", "preds", ".", "shape", "[", "-", "2", "]", ",", "preds", ".", "shape", "[", "-", "1", "]", ")", "return", "self", ".", "cls", "(", "preds_flat", ",", "None", ",", "process_all_frames", "=", "True", ")" ]
[ 1397, 4 ]
[ 1410, 66 ]
python
en
['en', 'error', 'th']
False
ConvConcatClassifier.forward
(self, preds, pixs)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1)
def forward(self, preds, pixs): """ Run the classifier on the predictions. Args: preds: (BxTx1xDxH'xW') pixs: (BxTx1xDxHxW) Retuns: solved: (Bx1) """ assert preds.shape[1] == pixs.shape[1], ( 'Must pass in run_decode=True if using a pixel-based classifier!!') del preds feats = self.conv_feat(None, pixs, process_all_frames=True) preds = self.concat_cls(feats, None) return preds
[ "def", "forward", "(", "self", ",", "preds", ",", "pixs", ")", ":", "assert", "preds", ".", "shape", "[", "1", "]", "==", "pixs", ".", "shape", "[", "1", "]", ",", "(", "'Must pass in run_decode=True if using a pixel-based classifier!!'", ")", "del", "preds", "feats", "=", "self", ".", "conv_feat", "(", "None", ",", "pixs", ",", "process_all_frames", "=", "True", ")", "preds", "=", "self", ".", "concat_cls", "(", "feats", ",", "None", ")", "return", "preds" ]
[ 1421, 4 ]
[ 1435, 20 ]
python
en
['en', 'error', 'th']
False
TrivialInteractor.forward
(cls, feat)
Args: feat: (B, T, Nobj, C, H', W') Returns: feat as is
Args: feat: (B, T, Nobj, C, H', W') Returns: feat as is
def forward(cls, feat): """ Args: feat: (B, T, Nobj, C, H', W') Returns: feat as is """ return feat
[ "def", "forward", "(", "cls", ",", "feat", ")", ":", "return", "feat" ]
[ 1445, 4 ]
[ 1452, 19 ]
python
en
['en', 'error', 'th']
False
TxEncoder.__init__
(self, in_dim, nheads, nlayers, maintain_dim=False)
Args: maintain_dim (bool): If true, it maps the final output to the same dimensionality as the input
Args: maintain_dim (bool): If true, it maps the final output to the same dimensionality as the input
def __init__(self, in_dim, nheads, nlayers, maintain_dim=False): """ Args: maintain_dim (bool): If true, it maps the final output to the same dimensionality as the input """ super().__init__() # Very basic position encoding self.loc_embed = nn.Sequential(nn.Linear(1, 4), nn.ReLU(inplace=True), nn.Linear(4, 8)) self.nheads = nheads self.nlayers = nlayers in_dim_loc = in_dim + 8 * nheads self.loc_mixer = nn.Linear(in_dim_loc, in_dim_loc) layer = nn.TransformerEncoderLayer(in_dim_loc, nheads) self.encoder = nn.TransformerEncoder(layer, nlayers) if maintain_dim: self.back_to_orig_dim = nn.Linear(in_dim_loc, in_dim) self.out_dim = in_dim else: self.back_to_orig_dim = lambda x: x # Identity self.out_dim = in_dim_loc
[ "def", "__init__", "(", "self", ",", "in_dim", ",", "nheads", ",", "nlayers", ",", "maintain_dim", "=", "False", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "# Very basic position encoding", "self", ".", "loc_embed", "=", "nn", ".", "Sequential", "(", "nn", ".", "Linear", "(", "1", ",", "4", ")", ",", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", ",", "nn", ".", "Linear", "(", "4", ",", "8", ")", ")", "self", ".", "nheads", "=", "nheads", "self", ".", "nlayers", "=", "nlayers", "in_dim_loc", "=", "in_dim", "+", "8", "*", "nheads", "self", ".", "loc_mixer", "=", "nn", ".", "Linear", "(", "in_dim_loc", ",", "in_dim_loc", ")", "layer", "=", "nn", ".", "TransformerEncoderLayer", "(", "in_dim_loc", ",", "nheads", ")", "self", ".", "encoder", "=", "nn", ".", "TransformerEncoder", "(", "layer", ",", "nlayers", ")", "if", "maintain_dim", ":", "self", ".", "back_to_orig_dim", "=", "nn", ".", "Linear", "(", "in_dim_loc", ",", "in_dim", ")", "self", ".", "out_dim", "=", "in_dim", "else", ":", "self", ".", "back_to_orig_dim", "=", "lambda", "x", ":", "x", "# Identity", "self", ".", "out_dim", "=", "in_dim_loc" ]
[ 1457, 4 ]
[ 1478, 37 ]
python
en
['en', 'error', 'th']
False
TxEncoder.forward
(self, feat)
Args: feat: (B, T, C) Returns: Same shape as input
Args: feat: (B, T, C) Returns: Same shape as input
def forward(self, feat): """ Args: feat: (B, T, C) Returns: Same shape as input """ # Add a location embedding (over time), since time axis will flatten loc_embedding = self.loc_embed( torch.arange(feat.shape[1], device=feat.device).unsqueeze(-1).float()) # Make into the shape of the feature loc_embedding = loc_embedding.unsqueeze(0).repeat( feat.shape[0], 1, self.nheads) feat = torch.cat([feat, loc_embedding], dim=-1) # Mix up the location information throughout the features so each head # would have it mixed_feat = self.loc_mixer(feat) # Transformer encoder expects the time dimension as the 0th! So gotta # permute things around return self.back_to_orig_dim( self.encoder(mixed_feat.permute(1, 0, 2)).permute(1, 0, 2))
[ "def", "forward", "(", "self", ",", "feat", ")", ":", "# Add a location embedding (over time), since time axis will flatten", "loc_embedding", "=", "self", ".", "loc_embed", "(", "torch", ".", "arange", "(", "feat", ".", "shape", "[", "1", "]", ",", "device", "=", "feat", ".", "device", ")", ".", "unsqueeze", "(", "-", "1", ")", ".", "float", "(", ")", ")", "# Make into the shape of the feature", "loc_embedding", "=", "loc_embedding", ".", "unsqueeze", "(", "0", ")", ".", "repeat", "(", "feat", ".", "shape", "[", "0", "]", ",", "1", ",", "self", ".", "nheads", ")", "feat", "=", "torch", ".", "cat", "(", "[", "feat", ",", "loc_embedding", "]", ",", "dim", "=", "-", "1", ")", "# Mix up the location information throughout the features so each head", "# would have it", "mixed_feat", "=", "self", ".", "loc_mixer", "(", "feat", ")", "# Transformer encoder expects the time dimension as the 0th! So gotta", "# permute things around", "return", "self", ".", "back_to_orig_dim", "(", "self", ".", "encoder", "(", "mixed_feat", ".", "permute", "(", "1", ",", "0", ",", "2", ")", ")", ".", "permute", "(", "1", ",", "0", ",", "2", ")", ")" ]
[ 1480, 4 ]
[ 1501, 71 ]
python
en
['en', 'error', 'th']
False
TxInteractor.forward
(self, feat)
Args: feat: (B, T, Nobj, C, H', W') Returns: Same shape as input
Args: feat: (B, T, Nobj, C, H', W') Returns: Same shape as input
def forward(self, feat): """ Args: feat: (B, T, Nobj, C, H', W') Returns: Same shape as input """ # Mean reduce the spatial dimensions for tx, then add it back to the # original feature as a residual connection feat_spat_mean = torch.mean(feat, dim=[-1, -2]) feat_flat = feat_spat_mean.flatten(1, 2) tx_feat = self.tx_enc(feat_flat) tx_feat = tx_feat.view( feat_spat_mean.shape).unsqueeze(-1).unsqueeze(-1) return feat + tx_feat
[ "def", "forward", "(", "self", ",", "feat", ")", ":", "# Mean reduce the spatial dimensions for tx, then add it back to the", "# original feature as a residual connection", "feat_spat_mean", "=", "torch", ".", "mean", "(", "feat", ",", "dim", "=", "[", "-", "1", ",", "-", "2", "]", ")", "feat_flat", "=", "feat_spat_mean", ".", "flatten", "(", "1", ",", "2", ")", "tx_feat", "=", "self", ".", "tx_enc", "(", "feat_flat", ")", "tx_feat", "=", "tx_feat", ".", "view", "(", "feat_spat_mean", ".", "shape", ")", ".", "unsqueeze", "(", "-", "1", ")", ".", "unsqueeze", "(", "-", "1", ")", "return", "feat", "+", "tx_feat" ]
[ 1511, 4 ]
[ 1525, 29 ]
python
en
['en', 'error', 'th']
False
TxSpatialAttention.forward
(self, feat)
Args: feats (B, T, Nobj, D, H', W')
Args: feats (B, T, Nobj, D, H', W')
def forward(self, feat): """ Args: feats (B, T, Nobj, D, H', W') """ feat_flat = torch.flatten(torch.flatten(feat, 0, 2), -2, -1) feat_att = self.tx_enc(feat_flat.transpose(1, 2)).transpose(1, 2) return feat_att.view(feat.shape)
[ "def", "forward", "(", "self", ",", "feat", ")", ":", "feat_flat", "=", "torch", ".", "flatten", "(", "torch", ".", "flatten", "(", "feat", ",", "0", ",", "2", ")", ",", "-", "2", ",", "-", "1", ")", "feat_att", "=", "self", ".", "tx_enc", "(", "feat_flat", ".", "transpose", "(", "1", ",", "2", ")", ")", ".", "transpose", "(", "1", ",", "2", ")", "return", "feat_att", ".", "view", "(", "feat", ".", "shape", ")" ]
[ 1542, 4 ]
[ 1549, 40 ]
python
en
['en', 'error', 'th']
False
Fwd.__init__
(self, agent_cfg)
Args: dyn_type: The type of dynamics model to use. dyn_n: Number of previous features used for prediction.
Args: dyn_type: The type of dynamics model to use. dyn_n: Number of previous features used for prediction.
def __init__(self, agent_cfg): """ Args: dyn_type: The type of dynamics model to use. dyn_n: Number of previous features used for prediction. """ super().__init__() # The image embedding model self.preproc = VideoPreprocessor(agent_cfg) self.enc = hydra.utils.instantiate(agent_cfg.encoder, self.preproc.out_dim, agent_cfg.nobj) dim = self.enc.out_dim self.interactor = hydra.utils.instantiate(agent_cfg.interactor, dim) # The dynamics model self.dyn = hydra.utils.instantiate(agent_cfg.dyn, self.enc, dim) # Classifier model self.nframes_to_cls = agent_cfg.nframes_to_cls # A attention of the latent features before passing them through the # classifier. self.spat_att = hydra.utils.instantiate(agent_cfg.spat_att, dim) self.cls = hydra.utils.instantiate(agent_cfg.cls, dim) # Decoder model self.dec = hydra.utils.instantiate(agent_cfg.decoder, dim, phyre.NUM_COLORS) # Other loss functions self.pix_loss = hydra.utils.instantiate(agent_cfg.loss_fn.pix) self.nce_loss = hydra.utils.instantiate(agent_cfg.loss_fn.nce, dim)
[ "def", "__init__", "(", "self", ",", "agent_cfg", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "# The image embedding model", "self", ".", "preproc", "=", "VideoPreprocessor", "(", "agent_cfg", ")", "self", ".", "enc", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "encoder", ",", "self", ".", "preproc", ".", "out_dim", ",", "agent_cfg", ".", "nobj", ")", "dim", "=", "self", ".", "enc", ".", "out_dim", "self", ".", "interactor", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "interactor", ",", "dim", ")", "# The dynamics model", "self", ".", "dyn", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "dyn", ",", "self", ".", "enc", ",", "dim", ")", "# Classifier model", "self", ".", "nframes_to_cls", "=", "agent_cfg", ".", "nframes_to_cls", "# A attention of the latent features before passing them through the", "# classifier.", "self", ".", "spat_att", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "spat_att", ",", "dim", ")", "self", ".", "cls", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "cls", ",", "dim", ")", "# Decoder model", "self", ".", "dec", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "decoder", ",", "dim", ",", "phyre", ".", "NUM_COLORS", ")", "# Other loss functions", "self", ".", "pix_loss", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "loss_fn", ".", "pix", ")", "self", ".", "nce_loss", "=", "hydra", ".", "utils", ".", "instantiate", "(", "agent_cfg", ".", "loss_fn", ".", "nce", ",", "dim", ")" ]
[ 1554, 4 ]
[ 1581, 75 ]
python
en
['en', 'error', 'th']
False
Fwd._forward_dyn
(self, feats, vids, n_fwd_times, need_intermediate=False)
Args: feats: (BxT_histxNobjxDxH'xW') vids: (BxT_histxCxHxW) The video corresponding to the feats, some dyn models might use them. n_fwd_times: Number of times to run the fwd model on the last frames need_intermediate: If true, give all the intermediate features Returns: all_preds: The predictions at each time step, in n_fwd_times all_pixs: The predictions in pixels. Note all dynamics models don't use pixels, so it might just give the last frame as output all_solved: The classification at each time step, for n_fwd_times
Args: feats: (BxT_histxNobjxDxH'xW') vids: (BxT_histxCxHxW) The video corresponding to the feats, some dyn models might use them. n_fwd_times: Number of times to run the fwd model on the last frames need_intermediate: If true, give all the intermediate features Returns: all_preds: The predictions at each time step, in n_fwd_times all_pixs: The predictions in pixels. Note all dynamics models don't use pixels, so it might just give the last frame as output all_solved: The classification at each time step, for n_fwd_times
def _forward_dyn(self, feats, vids, n_fwd_times, need_intermediate=False): """ Args: feats: (BxT_histxNobjxDxH'xW') vids: (BxT_histxCxHxW) The video corresponding to the feats, some dyn models might use them. n_fwd_times: Number of times to run the fwd model on the last frames need_intermediate: If true, give all the intermediate features Returns: all_preds: The predictions at each time step, in n_fwd_times all_pixs: The predictions in pixels. Note all dynamics models don't use pixels, so it might just give the last frame as output all_solved: The classification at each time step, for n_fwd_times """ all_preds = [] all_pixs = [] all_addl_losses = [] if n_fwd_times == 0: return [all_preds, all_pixs, all_addl_losses] def run_fwd_append(feats, pixs): pred, pred_pix, addl_losses = self.dyn(feats, pixs) all_preds.append(pred) all_pixs.append(pred_pix) all_addl_losses.append(addl_losses) run_fwd_append(feats, vids) n_fwd_times_copy = n_fwd_times while n_fwd_times - 1 > 0: feats = torch.cat( [feats[:, 1:, ...], torch.unsqueeze(all_preds[-1], axis=1)], dim=1) vids = torch.cat( [vids[:, 1:, ...], torch.unsqueeze(all_pixs[-1], axis=1)], dim=1) run_fwd_append(feats, vids) n_fwd_times -= 1 assert len(all_preds) == n_fwd_times_copy, ( '%d %d' % (len(all_preds), n_fwd_times_copy)) if not need_intermediate: all_preds = [all_preds[-1]] all_pixs = [all_pixs[-1]] all_addl_losses = [all_addl_losses[-1]] # Will compute solved or not later, after decode, in case the classifier # needs that information return all_preds, all_pixs, all_addl_losses
[ "def", "_forward_dyn", "(", "self", ",", "feats", ",", "vids", ",", "n_fwd_times", ",", "need_intermediate", "=", "False", ")", ":", "all_preds", "=", "[", "]", "all_pixs", "=", "[", "]", "all_addl_losses", "=", "[", "]", "if", "n_fwd_times", "==", "0", ":", "return", "[", "all_preds", ",", "all_pixs", ",", "all_addl_losses", "]", "def", "run_fwd_append", "(", "feats", ",", "pixs", ")", ":", "pred", ",", "pred_pix", ",", "addl_losses", "=", "self", ".", "dyn", "(", "feats", ",", "pixs", ")", "all_preds", ".", "append", "(", "pred", ")", "all_pixs", ".", "append", "(", "pred_pix", ")", "all_addl_losses", ".", "append", "(", "addl_losses", ")", "run_fwd_append", "(", "feats", ",", "vids", ")", "n_fwd_times_copy", "=", "n_fwd_times", "while", "n_fwd_times", "-", "1", ">", "0", ":", "feats", "=", "torch", ".", "cat", "(", "[", "feats", "[", ":", ",", "1", ":", ",", "...", "]", ",", "torch", ".", "unsqueeze", "(", "all_preds", "[", "-", "1", "]", ",", "axis", "=", "1", ")", "]", ",", "dim", "=", "1", ")", "vids", "=", "torch", ".", "cat", "(", "[", "vids", "[", ":", ",", "1", ":", ",", "...", "]", ",", "torch", ".", "unsqueeze", "(", "all_pixs", "[", "-", "1", "]", ",", "axis", "=", "1", ")", "]", ",", "dim", "=", "1", ")", "run_fwd_append", "(", "feats", ",", "vids", ")", "n_fwd_times", "-=", "1", "assert", "len", "(", "all_preds", ")", "==", "n_fwd_times_copy", ",", "(", "'%d %d'", "%", "(", "len", "(", "all_preds", ")", ",", "n_fwd_times_copy", ")", ")", "if", "not", "need_intermediate", ":", "all_preds", "=", "[", "all_preds", "[", "-", "1", "]", "]", "all_pixs", "=", "[", "all_pixs", "[", "-", "1", "]", "]", "all_addl_losses", "=", "[", "all_addl_losses", "[", "-", "1", "]", "]", "# Will compute solved or not later, after decode, in case the classifier", "# needs that information", "return", "all_preds", ",", "all_pixs", ",", "all_addl_losses" ]
[ 1590, 4 ]
[ 1637, 51 ]
python
en
['en', 'error', 'th']
False
Fwd._slice_for_dyn
(self, features_batched, n_hist_frames, nslices=-1)
Args: features_batched: BxTx.... can deal with any following dimensions, typically it is (BxTxNobjxDxH'xW') n_hist_frames (int): Number of frames to use as history nslices (int): If -1, make as many slices of the training data as possible. If 1, keep only the first one. (1 used when training classifier on top, which should always see videos from the start) Returns: B'x n_hist_frames x ... (B'x n_hist_frames x Nobj x D x H' x W')
Args: features_batched: BxTx.... can deal with any following dimensions, typically it is (BxTxNobjxDxH'xW') n_hist_frames (int): Number of frames to use as history nslices (int): If -1, make as many slices of the training data as possible. If 1, keep only the first one. (1 used when training classifier on top, which should always see videos from the start)
def _slice_for_dyn(self, features_batched, n_hist_frames, nslices=-1): """ Args: features_batched: BxTx.... can deal with any following dimensions, typically it is (BxTxNobjxDxH'xW') n_hist_frames (int): Number of frames to use as history nslices (int): If -1, make as many slices of the training data as possible. If 1, keep only the first one. (1 used when training classifier on top, which should always see videos from the start) Returns: B'x n_hist_frames x ... (B'x n_hist_frames x Nobj x D x H' x W') """ clip_hist = [] assert features_batched.shape[1] >= n_hist_frames for i in range((features_batched.shape[1] - n_hist_frames + 1)): if nslices > 0 and i >= nslices: break clip_hist.append(features_batched[:, i:i + n_hist_frames, ...]) clip_hist = torch.cat(clip_hist, dim=0) return clip_hist
[ "def", "_slice_for_dyn", "(", "self", ",", "features_batched", ",", "n_hist_frames", ",", "nslices", "=", "-", "1", ")", ":", "clip_hist", "=", "[", "]", "assert", "features_batched", ".", "shape", "[", "1", "]", ">=", "n_hist_frames", "for", "i", "in", "range", "(", "(", "features_batched", ".", "shape", "[", "1", "]", "-", "n_hist_frames", "+", "1", ")", ")", ":", "if", "nslices", ">", "0", "and", "i", ">=", "nslices", ":", "break", "clip_hist", ".", "append", "(", "features_batched", "[", ":", ",", "i", ":", "i", "+", "n_hist_frames", ",", "...", "]", ")", "clip_hist", "=", "torch", ".", "cat", "(", "clip_hist", ",", "dim", "=", "0", ")", "return", "clip_hist" ]
[ 1639, 4 ]
[ 1660, 24 ]
python
en
['en', 'error', 'th']
False
Fwd._forward_dec
(self, feats, pixels)
Args: feats: List of features (BxD) from the dynamics prediction stage, one for each time step predicted. pixels: List of corresponding pixels from the dynamics model. The dyn model may or may not actually generate new pixels.
Args: feats: List of features (BxD) from the dynamics prediction stage, one for each time step predicted. pixels: List of corresponding pixels from the dynamics model. The dyn model may or may not actually generate new pixels.
def _forward_dec(self, feats, pixels): """ Args: feats: List of features (BxD) from the dynamics prediction stage, one for each time step predicted. pixels: List of corresponding pixels from the dynamics model. The dyn model may or may not actually generate new pixels. """ return [self.dec(feat, pix) for feat, pix in zip(feats, pixels)]
[ "def", "_forward_dec", "(", "self", ",", "feats", ",", "pixels", ")", ":", "return", "[", "self", ".", "dec", "(", "feat", ",", "pix", ")", "for", "feat", ",", "pix", "in", "zip", "(", "feats", ",", "pixels", ")", "]" ]
[ 1662, 4 ]
[ 1670, 72 ]
python
en
['en', 'error', 'th']
False
Fwd.cswm_loss
(self, pred, gt, hinge=1.0)
The energy based contrastive loss. Args: pred (BxNobjxDxH'xW') gt (BxNobjxDxH'xW') From https://github.com/tkipf/c-swm/blob/master/modules.py#L94
The energy based contrastive loss. Args: pred (BxNobjxDxH'xW') gt (BxNobjxDxH'xW') From https://github.com/tkipf/c-swm/blob/master/modules.py#L94
def cswm_loss(self, pred, gt, hinge=1.0): """ The energy based contrastive loss. Args: pred (BxNobjxDxH'xW') gt (BxNobjxDxH'xW') From https://github.com/tkipf/c-swm/blob/master/modules.py#L94 """ pred = pred.view(pred.shape[:2] + (-1, )) gt = gt.view(gt.shape[:2] + (-1, )) batch_size = gt.size(0) perm = np.random.permutation(batch_size) neg = gt[perm] def energy(pred, gt, sigma=0.5): """Energy function based on normalized squared L2 norm. Args: pred (B, Nobj, D') gt (B, Nobj, D') """ norm = 0.5 / (sigma**2) diff = pred - gt return norm * diff.pow(2).sum(2).mean(1) pos_loss = energy(pred, gt) zeros = torch.zeros_like(pos_loss) pos_loss = pos_loss.mean() neg_loss = torch.max(zeros, hinge - energy(pred, neg)).mean() return pos_loss + neg_loss
[ "def", "cswm_loss", "(", "self", ",", "pred", ",", "gt", ",", "hinge", "=", "1.0", ")", ":", "pred", "=", "pred", ".", "view", "(", "pred", ".", "shape", "[", ":", "2", "]", "+", "(", "-", "1", ",", ")", ")", "gt", "=", "gt", ".", "view", "(", "gt", ".", "shape", "[", ":", "2", "]", "+", "(", "-", "1", ",", ")", ")", "batch_size", "=", "gt", ".", "size", "(", "0", ")", "perm", "=", "np", ".", "random", ".", "permutation", "(", "batch_size", ")", "neg", "=", "gt", "[", "perm", "]", "def", "energy", "(", "pred", ",", "gt", ",", "sigma", "=", "0.5", ")", ":", "\"\"\"Energy function based on normalized squared L2 norm.\n Args:\n pred (B, Nobj, D')\n gt (B, Nobj, D')\n \"\"\"", "norm", "=", "0.5", "/", "(", "sigma", "**", "2", ")", "diff", "=", "pred", "-", "gt", "return", "norm", "*", "diff", ".", "pow", "(", "2", ")", ".", "sum", "(", "2", ")", ".", "mean", "(", "1", ")", "pos_loss", "=", "energy", "(", "pred", ",", "gt", ")", "zeros", "=", "torch", ".", "zeros_like", "(", "pos_loss", ")", "pos_loss", "=", "pos_loss", ".", "mean", "(", ")", "neg_loss", "=", "torch", ".", "max", "(", "zeros", ",", "hinge", "-", "energy", "(", "pred", ",", "neg", ")", ")", ".", "mean", "(", ")", "return", "pos_loss", "+", "neg_loss" ]
[ 1673, 4 ]
[ 1701, 34 ]
python
en
['en', 'error', 'th']
False
Fwd.autoencoder_loss
(self, pix, latent, autoenc_loss_ratio)
Runs a random portion of the actual frames through decoder to incur a loss to encourage the intermediate representation to learn a good autoencoder as well. Random fraction only for compute reasons. Ideally would run every frame (ratio = 1) Args: pix (B, T, H, W): Actual pixels of the input frames latent (B, T, Nobj, D, H', W'): Latent representation of the input frames autoenc_loss_ratio (float): What percentage of the input frames to run it on. Only for compute reasons, ideally run it on all. Returns: loss {'autoenc': (1,) <float>} for the loss
Runs a random portion of the actual frames through decoder to incur a loss to encourage the intermediate representation to learn a good autoencoder as well. Random fraction only for compute reasons. Ideally would run every frame (ratio = 1) Args: pix (B, T, H, W): Actual pixels of the input frames latent (B, T, Nobj, D, H', W'): Latent representation of the input frames autoenc_loss_ratio (float): What percentage of the input frames to run it on. Only for compute reasons, ideally run it on all. Returns: loss {'autoenc': (1,) <float>} for the loss
def autoencoder_loss(self, pix, latent, autoenc_loss_ratio): """ Runs a random portion of the actual frames through decoder to incur a loss to encourage the intermediate representation to learn a good autoencoder as well. Random fraction only for compute reasons. Ideally would run every frame (ratio = 1) Args: pix (B, T, H, W): Actual pixels of the input frames latent (B, T, Nobj, D, H', W'): Latent representation of the input frames autoenc_loss_ratio (float): What percentage of the input frames to run it on. Only for compute reasons, ideally run it on all. Returns: loss {'autoenc': (1,) <float>} for the loss """ # Flatten the Batch and time dimension to get all the frames pix_flat = torch.flatten(pix, 0, 1) latent_flat = torch.flatten(latent, 0, 1) # Select a subset of the frames to run the loss on assert pix_flat.shape[0] == latent_flat.shape[0] idx = np.arange(pix_flat.shape[0]) np.random.shuffle(idx) sel_cnt = int(autoenc_loss_ratio * len(idx)) idx_sel = np.sort(idx[:sel_cnt]) pix_flat_sel = pix_flat[idx_sel, ...] latent_flat_sel = latent_flat[idx_sel, ...] # Generate the pixels for the latent, and incur loss pred_flat_sel = combine_obj_pixels(self.dec(latent_flat_sel, None), 1) loss = self.pix_loss(pred_flat_sel, pix_flat_sel).unsqueeze(0) return {'autoenc_pix': loss}
[ "def", "autoencoder_loss", "(", "self", ",", "pix", ",", "latent", ",", "autoenc_loss_ratio", ")", ":", "# Flatten the Batch and time dimension to get all the frames", "pix_flat", "=", "torch", ".", "flatten", "(", "pix", ",", "0", ",", "1", ")", "latent_flat", "=", "torch", ".", "flatten", "(", "latent", ",", "0", ",", "1", ")", "# Select a subset of the frames to run the loss on", "assert", "pix_flat", ".", "shape", "[", "0", "]", "==", "latent_flat", ".", "shape", "[", "0", "]", "idx", "=", "np", ".", "arange", "(", "pix_flat", ".", "shape", "[", "0", "]", ")", "np", ".", "random", ".", "shuffle", "(", "idx", ")", "sel_cnt", "=", "int", "(", "autoenc_loss_ratio", "*", "len", "(", "idx", ")", ")", "idx_sel", "=", "np", ".", "sort", "(", "idx", "[", ":", "sel_cnt", "]", ")", "pix_flat_sel", "=", "pix_flat", "[", "idx_sel", ",", "...", "]", "latent_flat_sel", "=", "latent_flat", "[", "idx_sel", ",", "...", "]", "# Generate the pixels for the latent, and incur loss", "pred_flat_sel", "=", "combine_obj_pixels", "(", "self", ".", "dec", "(", "latent_flat_sel", ",", "None", ")", ",", "1", ")", "loss", "=", "self", ".", "pix_loss", "(", "pred_flat_sel", ",", "pix_flat_sel", ")", ".", "unsqueeze", "(", "0", ")", "return", "{", "'autoenc_pix'", ":", "loss", "}" ]
[ 1708, 4 ]
[ 1737, 36 ]
python
en
['en', 'error', 'th']
False
Fwd.solved_or_not_loss
(self, clip_preds_solved, vid_is_solved)
Repeat the is_solved to as many times the batch was repeated to get the class label at each forward prediction Args: clip_preds_solved (B',) vid_is_solved (B,) B and B' might be different but B' must be a multiple of B, since it happens when num_slices > 1 Returns: loss {'ce': (1,) <float>} for the loss
Repeat the is_solved to as many times the batch was repeated to get the class label at each forward prediction Args: clip_preds_solved (B',) vid_is_solved (B,) B and B' might be different but B' must be a multiple of B, since it happens when num_slices > 1 Returns: loss {'ce': (1,) <float>} for the loss
def solved_or_not_loss(self, clip_preds_solved, vid_is_solved): """ Repeat the is_solved to as many times the batch was repeated to get the class label at each forward prediction Args: clip_preds_solved (B',) vid_is_solved (B,) B and B' might be different but B' must be a multiple of B, since it happens when num_slices > 1 Returns: loss {'ce': (1,) <float>} for the loss """ assert clip_preds_solved.shape[0] % vid_is_solved.shape[0] == 0 return { 'ce': self.ce_loss( clip_preds_solved, vid_is_solved.repeat((clip_preds_solved.shape[0] // vid_is_solved.shape[0], ))).unsqueeze(0) }
[ "def", "solved_or_not_loss", "(", "self", ",", "clip_preds_solved", ",", "vid_is_solved", ")", ":", "assert", "clip_preds_solved", ".", "shape", "[", "0", "]", "%", "vid_is_solved", ".", "shape", "[", "0", "]", "==", "0", "return", "{", "'ce'", ":", "self", ".", "ce_loss", "(", "clip_preds_solved", ",", "vid_is_solved", ".", "repeat", "(", "(", "clip_preds_solved", ".", "shape", "[", "0", "]", "//", "vid_is_solved", ".", "shape", "[", "0", "]", ",", ")", ")", ")", ".", "unsqueeze", "(", "0", ")", "}" ]
[ 1739, 4 ]
[ 1758, 9 ]
python
en
['en', 'error', 'th']
False
Fwd._compute_losses
(self, clip_pred, clip_pred_pix, vid_feat, vid, n_hist_frames, n_fwd_times)
Compute all losses possible.
Compute all losses possible.
def _compute_losses(self, clip_pred, clip_pred_pix, vid_feat, vid, n_hist_frames, n_fwd_times): """ Compute all losses possible. """ dummy_loss = torch.Tensor([-1]).to(clip_pred.device) losses = {} # NCE and pixel loss # find the GT for each clip, note that all predictions may not have a GT # since the last n_hist_frames for a video will make a prediction that # goes out of the list of frames that were extracted for that video. feat_preds = [] feat_gt = [] pix_preds = [] pix_gt = [] batch_size = vid_feat.shape[0] gt_max_time = vid_feat.shape[1] # Max slices that could have been made of the data, to use all of the # training clip max_slices_with_gt = gt_max_time - n_hist_frames - n_fwd_times + 1 num_slices = clip_pred.shape[0] // batch_size for i in range(min(max_slices_with_gt, num_slices)): corr_pred = clip_pred[i * batch_size:(i + 1) * batch_size, ...] # Get the corresponding GT predictions for this pred corr_gt = vid_feat[:, i + n_hist_frames + n_fwd_times - 1] assert corr_gt.shape == corr_pred.shape feat_preds.append(corr_pred) feat_gt.append(corr_gt) # Same thing for pix if clip_pred_pix is not None: corr_pix_pred = clip_pred_pix[i * vid_feat.shape[0]:(i + 1) * vid_feat.shape[0], ...] corr_pix_gt = vid[:, i + n_hist_frames + n_fwd_times - 1] pix_preds.append(corr_pix_pred) pix_gt.append(corr_pix_gt) if len(feat_gt) > 0: # Keep a batch dimension to the loss, since it will be run over # multiple GPUs feat_preds = torch.cat(feat_preds) feat_gt = torch.cat(feat_gt) losses['nce'] = self.nce_loss(feat_preds, feat_gt).unsqueeze(0) losses['cswm'] = self.cswm_loss(feat_preds, feat_gt).unsqueeze(0) else: losses['nce'] = dummy_loss losses['cswm'] = dummy_loss # Reconstruction loss if len(pix_gt) > 0: losses['pix'] = self.pix_loss(torch.cat(pix_preds), torch.cat(pix_gt)).unsqueeze(0) else: losses['pix'] = dummy_loss return losses
[ "def", "_compute_losses", "(", "self", ",", "clip_pred", ",", "clip_pred_pix", ",", "vid_feat", ",", "vid", ",", "n_hist_frames", ",", "n_fwd_times", ")", ":", "dummy_loss", "=", "torch", ".", "Tensor", "(", "[", "-", "1", "]", ")", ".", "to", "(", "clip_pred", ".", "device", ")", "losses", "=", "{", "}", "# NCE and pixel loss", "# find the GT for each clip, note that all predictions may not have a GT", "# since the last n_hist_frames for a video will make a prediction that", "# goes out of the list of frames that were extracted for that video.", "feat_preds", "=", "[", "]", "feat_gt", "=", "[", "]", "pix_preds", "=", "[", "]", "pix_gt", "=", "[", "]", "batch_size", "=", "vid_feat", ".", "shape", "[", "0", "]", "gt_max_time", "=", "vid_feat", ".", "shape", "[", "1", "]", "# Max slices that could have been made of the data, to use all of the", "# training clip", "max_slices_with_gt", "=", "gt_max_time", "-", "n_hist_frames", "-", "n_fwd_times", "+", "1", "num_slices", "=", "clip_pred", ".", "shape", "[", "0", "]", "//", "batch_size", "for", "i", "in", "range", "(", "min", "(", "max_slices_with_gt", ",", "num_slices", ")", ")", ":", "corr_pred", "=", "clip_pred", "[", "i", "*", "batch_size", ":", "(", "i", "+", "1", ")", "*", "batch_size", ",", "...", "]", "# Get the corresponding GT predictions for this pred", "corr_gt", "=", "vid_feat", "[", ":", ",", "i", "+", "n_hist_frames", "+", "n_fwd_times", "-", "1", "]", "assert", "corr_gt", ".", "shape", "==", "corr_pred", ".", "shape", "feat_preds", ".", "append", "(", "corr_pred", ")", "feat_gt", ".", "append", "(", "corr_gt", ")", "# Same thing for pix", "if", "clip_pred_pix", "is", "not", "None", ":", "corr_pix_pred", "=", "clip_pred_pix", "[", "i", "*", "vid_feat", ".", "shape", "[", "0", "]", ":", "(", "i", "+", "1", ")", "*", "vid_feat", ".", "shape", "[", "0", "]", ",", "...", "]", "corr_pix_gt", "=", "vid", "[", ":", ",", "i", "+", "n_hist_frames", "+", "n_fwd_times", "-", "1", "]", "pix_preds", ".", "append", "(", "corr_pix_pred", ")", "pix_gt", ".", "append", "(", "corr_pix_gt", ")", "if", "len", "(", "feat_gt", ")", ">", "0", ":", "# Keep a batch dimension to the loss, since it will be run over", "# multiple GPUs", "feat_preds", "=", "torch", ".", "cat", "(", "feat_preds", ")", "feat_gt", "=", "torch", ".", "cat", "(", "feat_gt", ")", "losses", "[", "'nce'", "]", "=", "self", ".", "nce_loss", "(", "feat_preds", ",", "feat_gt", ")", ".", "unsqueeze", "(", "0", ")", "losses", "[", "'cswm'", "]", "=", "self", ".", "cswm_loss", "(", "feat_preds", ",", "feat_gt", ")", ".", "unsqueeze", "(", "0", ")", "else", ":", "losses", "[", "'nce'", "]", "=", "dummy_loss", "losses", "[", "'cswm'", "]", "=", "dummy_loss", "# Reconstruction loss", "if", "len", "(", "pix_gt", ")", ">", "0", ":", "losses", "[", "'pix'", "]", "=", "self", ".", "pix_loss", "(", "torch", ".", "cat", "(", "pix_preds", ")", ",", "torch", ".", "cat", "(", "pix_gt", ")", ")", ".", "unsqueeze", "(", "0", ")", "else", ":", "losses", "[", "'pix'", "]", "=", "dummy_loss", "return", "losses" ]
[ 1762, 4 ]
[ 1814, 21 ]
python
en
['en', 'error', 'th']
False
Fwd._cls
(self, feat_hist, pix_hist, feat_preds, pix_preds)
Wrapper around the classifier, collates all the input frames/features and predicted future frames/features. The images, features are already summed over the objects Args: feat_hist: (B, T, C, H', W') pix_hist: (B, T, 7, H, W) feat_preds [list of (B, C, H', W')] -- len = num predictions pix_preds [list of (B, 7, H, W)] -- len = num predictions The elements could be None, since not all models predict pixels Returns: (B,) predicted scores for the clips
Wrapper around the classifier, collates all the input frames/features and predicted future frames/features. The images, features are already summed over the objects Args: feat_hist: (B, T, C, H', W') pix_hist: (B, T, 7, H, W) feat_preds [list of (B, C, H', W')] -- len = num predictions pix_preds [list of (B, 7, H, W)] -- len = num predictions The elements could be None, since not all models predict pixels Returns: (B,) predicted scores for the clips
def _cls(self, feat_hist, pix_hist, feat_preds, pix_preds): """ Wrapper around the classifier, collates all the input frames/features and predicted future frames/features. The images, features are already summed over the objects Args: feat_hist: (B, T, C, H', W') pix_hist: (B, T, 7, H, W) feat_preds [list of (B, C, H', W')] -- len = num predictions pix_preds [list of (B, 7, H, W)] -- len = num predictions The elements could be None, since not all models predict pixels Returns: (B,) predicted scores for the clips """ feats_combined = feat_hist if feat_preds is not None and len(feat_preds) > 0: feats_combined = torch.cat([feat_hist] + [el.unsqueeze(1) for el in feat_preds], dim=1) pix_combined = pix_hist if (pix_preds is not None and len(pix_preds) > 0 and pix_preds[0] is not None): pix_combined = torch.cat([pix_combined] + [el.unsqueeze(1) for el in pix_preds], dim=1) # Sum over objs -- we want the classifier model to see everything # at the same time # They are summed now, but need the dimension still pix_combined = pix_combined.unsqueeze(2) feats_combined = feats_combined.unsqueeze(2) # If need to keep only a subset of the frames if self.nframes_to_cls > 0: pix_combined = pix_combined[:, :self.nframes_to_cls, ...] feats_combined = feats_combined[:, :self.nframes_to_cls, ...] feats_combined = self.spat_att(feats_combined) # Keep the last prediction, as that should ideally be the best # prediction of whether it was solved or not # torch.max was hard to optimize through return self.cls(feats_combined, pix_combined)[:, -1]
[ "def", "_cls", "(", "self", ",", "feat_hist", ",", "pix_hist", ",", "feat_preds", ",", "pix_preds", ")", ":", "feats_combined", "=", "feat_hist", "if", "feat_preds", "is", "not", "None", "and", "len", "(", "feat_preds", ")", ">", "0", ":", "feats_combined", "=", "torch", ".", "cat", "(", "[", "feat_hist", "]", "+", "[", "el", ".", "unsqueeze", "(", "1", ")", "for", "el", "in", "feat_preds", "]", ",", "dim", "=", "1", ")", "pix_combined", "=", "pix_hist", "if", "(", "pix_preds", "is", "not", "None", "and", "len", "(", "pix_preds", ")", ">", "0", "and", "pix_preds", "[", "0", "]", "is", "not", "None", ")", ":", "pix_combined", "=", "torch", ".", "cat", "(", "[", "pix_combined", "]", "+", "[", "el", ".", "unsqueeze", "(", "1", ")", "for", "el", "in", "pix_preds", "]", ",", "dim", "=", "1", ")", "# Sum over objs -- we want the classifier model to see everything", "# at the same time", "# They are summed now, but need the dimension still", "pix_combined", "=", "pix_combined", ".", "unsqueeze", "(", "2", ")", "feats_combined", "=", "feats_combined", ".", "unsqueeze", "(", "2", ")", "# If need to keep only a subset of the frames", "if", "self", ".", "nframes_to_cls", ">", "0", ":", "pix_combined", "=", "pix_combined", "[", ":", ",", ":", "self", ".", "nframes_to_cls", ",", "...", "]", "feats_combined", "=", "feats_combined", "[", ":", ",", ":", "self", ".", "nframes_to_cls", ",", "...", "]", "feats_combined", "=", "self", ".", "spat_att", "(", "feats_combined", ")", "# Keep the last prediction, as that should ideally be the best", "# prediction of whether it was solved or not", "# torch.max was hard to optimize through", "return", "self", ".", "cls", "(", "feats_combined", ",", "pix_combined", ")", "[", ":", ",", "-", "1", "]" ]
[ 1816, 4 ]
[ 1854, 60 ]
python
en
['en', 'error', 'th']
False
Fwd.forward
(self, vid, vid_is_solved, n_hist_frames=3, n_fwd_times=1, n_fwd_times_incur_loss=999999, run_decode=False, compute_losses=False, need_intermediate=False, autoenc_loss_ratio=0.0, nslices=-1)
Args: vid: (BxTxNobjxHxW) The input video vid_is_solved: (Bx1) Whether the video is solved in the end of not. Could be None at test time. n_hist_frames: (int) Number of frames to use as history for prediction n_fwd_times: (int) How many times to run the forward dynamics model n_fwd_times_incur_loss (int): Upto how many of these forwards to incur loss on. run_decode: (bool) Decode the features into pixel output compute_losses: Should be set at train time. Will compute losses, whatever it can given the data (eg, if vid_is_solved is not passed to the function, it will not compute the CE loss). need_intermediate (bool): Set true if you want to run the dynamics model and need all the intermediate results. Else, will return a list with only 1 element, the final output. autoenc_loss_ratio (float btw 0-1): Set to 1 to run auto-encoder style loss on all frames when run_decode is set. num_slices (int): See in the _slice_for_dyn fn Returns: clip_feat: BxTxD
Args: vid: (BxTxNobjxHxW) The input video vid_is_solved: (Bx1) Whether the video is solved in the end of not. Could be None at test time. n_hist_frames: (int) Number of frames to use as history for prediction n_fwd_times: (int) How many times to run the forward dynamics model n_fwd_times_incur_loss (int): Upto how many of these forwards to incur loss on. run_decode: (bool) Decode the features into pixel output compute_losses: Should be set at train time. Will compute losses, whatever it can given the data (eg, if vid_is_solved is not passed to the function, it will not compute the CE loss). need_intermediate (bool): Set true if you want to run the dynamics model and need all the intermediate results. Else, will return a list with only 1 element, the final output. autoenc_loss_ratio (float btw 0-1): Set to 1 to run auto-encoder style loss on all frames when run_decode is set. num_slices (int): See in the _slice_for_dyn fn Returns: clip_feat: BxTxD
def forward(self, vid, vid_is_solved, n_hist_frames=3, n_fwd_times=1, n_fwd_times_incur_loss=999999, run_decode=False, compute_losses=False, need_intermediate=False, autoenc_loss_ratio=0.0, nslices=-1): """ Args: vid: (BxTxNobjxHxW) The input video vid_is_solved: (Bx1) Whether the video is solved in the end of not. Could be None at test time. n_hist_frames: (int) Number of frames to use as history for prediction n_fwd_times: (int) How many times to run the forward dynamics model n_fwd_times_incur_loss (int): Upto how many of these forwards to incur loss on. run_decode: (bool) Decode the features into pixel output compute_losses: Should be set at train time. Will compute losses, whatever it can given the data (eg, if vid_is_solved is not passed to the function, it will not compute the CE loss). need_intermediate (bool): Set true if you want to run the dynamics model and need all the intermediate results. Else, will return a list with only 1 element, the final output. autoenc_loss_ratio (float btw 0-1): Set to 1 to run auto-encoder style loss on all frames when run_decode is set. num_slices (int): See in the _slice_for_dyn fn Returns: clip_feat: BxTxD """ vid_preproc = self.preproc.preprocess_vid(vid) obj_feat = self.enc(vid_preproc) clip_hist = self._slice_for_dyn(obj_feat, n_hist_frames, nslices=nslices) vid_hist = self._slice_for_dyn(vid_preproc, n_hist_frames, nslices=nslices) assert clip_hist.shape[1] == n_hist_frames clip_hist = self.interactor(clip_hist) clip_preds, clip_preds_pix, clip_preds_addl_losses = self._forward_dyn( clip_hist, vid_hist, n_fwd_times, need_intermediate) if run_decode: clip_preds_pix = self._forward_dec(clip_preds, clip_preds_pix) else: clip_preds_pix = [None] * len(clip_preds) # Compute the solved or not, will only do for the ones asked for clip_preds_solved = self._cls( combine_obj_pixels(clip_hist, 2), combine_obj_pixels(vid_hist, 2), [combine_obj_pixels(el, 1) for el in clip_preds], [combine_obj_pixels(el, 1) for el in clip_preds_pix]) all_losses = [] clip_preds_pix_unpreproc_for_loss = [ self.preproc.unpreprocess_frame_for_loss(el) for el in clip_preds_pix ] if compute_losses: for i in range(min(len(clip_preds), n_fwd_times_incur_loss)): # Compute losses at each prediction step, if need_intermediate # is set. Else, it will only return a single output # (at the last prediction), and then we can only incur loss at # that point. if not need_intermediate: assert len(clip_preds) == 1 pred_id = -1 # Only loss on predicting the final rolled out obs this_fwd_times = n_fwd_times else: assert len(clip_preds) == n_fwd_times pred_id = i this_fwd_times = i + 1 all_losses.append( self._compute_losses( # For the loss, using only the last prediction (for now) clip_preds[pred_id], combine_obj_pixels( clip_preds_pix_unpreproc_for_loss[pred_id], 1), obj_feat, combine_obj_pixels(vid, 2), n_hist_frames, this_fwd_times)) all_losses = average_losses(all_losses) all_losses.update(average_losses(clip_preds_addl_losses)) all_losses.update( self.solved_or_not_loss(clip_preds_solved, vid_is_solved)) # Add losses on the provided frames if requested if run_decode and autoenc_loss_ratio > 0: all_losses.update( self.autoencoder_loss(combine_obj_pixels(vid, 2), obj_feat, autoenc_loss_ratio)) clip_preds_pix_unpreproc = [ combine_obj_pixels(self.preproc.unpreprocess_frame_after_loss(el), 1) for el in clip_preds_pix_unpreproc_for_loss ] all_preds = { 'feats': clip_preds, 'is_solved': clip_preds_solved, 'pixels': clip_preds_pix_unpreproc, } return all_preds, all_losses
[ "def", "forward", "(", "self", ",", "vid", ",", "vid_is_solved", ",", "n_hist_frames", "=", "3", ",", "n_fwd_times", "=", "1", ",", "n_fwd_times_incur_loss", "=", "999999", ",", "run_decode", "=", "False", ",", "compute_losses", "=", "False", ",", "need_intermediate", "=", "False", ",", "autoenc_loss_ratio", "=", "0.0", ",", "nslices", "=", "-", "1", ")", ":", "vid_preproc", "=", "self", ".", "preproc", ".", "preprocess_vid", "(", "vid", ")", "obj_feat", "=", "self", ".", "enc", "(", "vid_preproc", ")", "clip_hist", "=", "self", ".", "_slice_for_dyn", "(", "obj_feat", ",", "n_hist_frames", ",", "nslices", "=", "nslices", ")", "vid_hist", "=", "self", ".", "_slice_for_dyn", "(", "vid_preproc", ",", "n_hist_frames", ",", "nslices", "=", "nslices", ")", "assert", "clip_hist", ".", "shape", "[", "1", "]", "==", "n_hist_frames", "clip_hist", "=", "self", ".", "interactor", "(", "clip_hist", ")", "clip_preds", ",", "clip_preds_pix", ",", "clip_preds_addl_losses", "=", "self", ".", "_forward_dyn", "(", "clip_hist", ",", "vid_hist", ",", "n_fwd_times", ",", "need_intermediate", ")", "if", "run_decode", ":", "clip_preds_pix", "=", "self", ".", "_forward_dec", "(", "clip_preds", ",", "clip_preds_pix", ")", "else", ":", "clip_preds_pix", "=", "[", "None", "]", "*", "len", "(", "clip_preds", ")", "# Compute the solved or not, will only do for the ones asked for", "clip_preds_solved", "=", "self", ".", "_cls", "(", "combine_obj_pixels", "(", "clip_hist", ",", "2", ")", ",", "combine_obj_pixels", "(", "vid_hist", ",", "2", ")", ",", "[", "combine_obj_pixels", "(", "el", ",", "1", ")", "for", "el", "in", "clip_preds", "]", ",", "[", "combine_obj_pixels", "(", "el", ",", "1", ")", "for", "el", "in", "clip_preds_pix", "]", ")", "all_losses", "=", "[", "]", "clip_preds_pix_unpreproc_for_loss", "=", "[", "self", ".", "preproc", ".", "unpreprocess_frame_for_loss", "(", "el", ")", "for", "el", "in", "clip_preds_pix", "]", "if", "compute_losses", ":", "for", "i", "in", "range", "(", "min", "(", "len", "(", "clip_preds", ")", ",", "n_fwd_times_incur_loss", ")", ")", ":", "# Compute losses at each prediction step, if need_intermediate", "# is set. Else, it will only return a single output", "# (at the last prediction), and then we can only incur loss at", "# that point.", "if", "not", "need_intermediate", ":", "assert", "len", "(", "clip_preds", ")", "==", "1", "pred_id", "=", "-", "1", "# Only loss on predicting the final rolled out obs", "this_fwd_times", "=", "n_fwd_times", "else", ":", "assert", "len", "(", "clip_preds", ")", "==", "n_fwd_times", "pred_id", "=", "i", "this_fwd_times", "=", "i", "+", "1", "all_losses", ".", "append", "(", "self", ".", "_compute_losses", "(", "# For the loss, using only the last prediction (for now)", "clip_preds", "[", "pred_id", "]", ",", "combine_obj_pixels", "(", "clip_preds_pix_unpreproc_for_loss", "[", "pred_id", "]", ",", "1", ")", ",", "obj_feat", ",", "combine_obj_pixels", "(", "vid", ",", "2", ")", ",", "n_hist_frames", ",", "this_fwd_times", ")", ")", "all_losses", "=", "average_losses", "(", "all_losses", ")", "all_losses", ".", "update", "(", "average_losses", "(", "clip_preds_addl_losses", ")", ")", "all_losses", ".", "update", "(", "self", ".", "solved_or_not_loss", "(", "clip_preds_solved", ",", "vid_is_solved", ")", ")", "# Add losses on the provided frames if requested", "if", "run_decode", "and", "autoenc_loss_ratio", ">", "0", ":", "all_losses", ".", "update", "(", "self", ".", "autoencoder_loss", "(", "combine_obj_pixels", "(", "vid", ",", "2", ")", ",", "obj_feat", ",", "autoenc_loss_ratio", ")", ")", "clip_preds_pix_unpreproc", "=", "[", "combine_obj_pixels", "(", "self", ".", "preproc", ".", "unpreprocess_frame_after_loss", "(", "el", ")", ",", "1", ")", "for", "el", "in", "clip_preds_pix_unpreproc_for_loss", "]", "all_preds", "=", "{", "'feats'", ":", "clip_preds", ",", "'is_solved'", ":", "clip_preds_solved", ",", "'pixels'", ":", "clip_preds_pix_unpreproc", ",", "}", "return", "all_preds", ",", "all_losses" ]
[ 1856, 4 ]
[ 1959, 36 ]
python
en
['en', 'error', 'th']
False
block_output_tokens
(blocks, true_tokens)
blocks = the output from blockify true_tokens = a list of true tokens
blocks = the output from blockify true_tokens = a list of true tokens
def block_output_tokens(blocks, true_tokens): """ blocks = the output from blockify true_tokens = a list of true tokens """ assert len(blocks) == len(true_tokens) for k in range_(len(blocks)): block_tokens = re.split(r"\s+", blocks[k].text.strip()) assert block_tokens == true_tokens[k]
[ "def", "block_output_tokens", "(", "blocks", ",", "true_tokens", ")", ":", "assert", "len", "(", "blocks", ")", "==", "len", "(", "true_tokens", ")", "for", "k", "in", "range_", "(", "len", "(", "blocks", ")", ")", ":", "block_tokens", "=", "re", ".", "split", "(", "r\"\\s+\"", ",", "blocks", "[", "k", "]", ".", "text", ".", "strip", "(", ")", ")", "assert", "block_tokens", "==", "true_tokens", "[", "k", "]" ]
[ 21, 0 ]
[ 29, 45 ]
python
en
['en', 'error', 'th']
False
TestBlockifier.test_lxml_error
(self)
tests the case where lxml raises an error during parsing also handles case where lxml returns None for the tree
tests the case where lxml raises an error during parsing
def test_lxml_error(self): """tests the case where lxml raises an error during parsing also handles case where lxml returns None for the tree""" # this raises an error in parsing with pytest.raises(BlockifyError): Blockifier.blockify("") # this returns None in lxml assert etree.fromstring("<!--", etree.HTMLParser(recover=True)) is None with pytest.raises(BlockifyError): Blockifier.blockify("<!--")
[ "def", "test_lxml_error", "(", "self", ")", ":", "# this raises an error in parsing", "with", "pytest", ".", "raises", "(", "BlockifyError", ")", ":", "Blockifier", ".", "blockify", "(", "\"\"", ")", "# this returns None in lxml", "assert", "etree", ".", "fromstring", "(", "\"<!--\"", ",", "etree", ".", "HTMLParser", "(", "recover", "=", "True", ")", ")", "is", "None", "with", "pytest", ".", "raises", "(", "BlockifyError", ")", ":", "Blockifier", ".", "blockify", "(", "\"<!--\"", ")" ]
[ 48, 4 ]
[ 58, 39 ]
python
en
['en', 'en', 'en']
True
TestBlockifier.test_very_simple
(self)
test_very_simple
test_very_simple
def test_very_simple(self): """test_very_simple""" s = """<div>some text <script> skip this </script> more text here </div>""" blocks = Blockifier.blockify(s) block_output_tokens(blocks, [['some', 'text', 'more', 'text', 'here']])
[ "def", "test_very_simple", "(", "self", ")", ":", "s", "=", "\"\"\"<div>some text\n <script> skip this </script>\n more text here\n </div>\"\"\"", "blocks", "=", "Blockifier", ".", "blockify", "(", "s", ")", "block_output_tokens", "(", "blocks", ",", "[", "[", "'some'", ",", "'text'", ",", "'more'", ",", "'text'", ",", "'here'", "]", "]", ")" ]
[ 60, 4 ]
[ 67, 79 ]
python
en
['en', 'en', 'en']
False