response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Treebeard's path comparison logic can fail on certain locales such as sk_SK, which
sort numbers after letters. To avoid this, we explicitly set the collation for the
'path' column to the (non-locale-specific) 'C' collation.
See: https://groups.google.com/d/msg/wagtail/q0leyuCnYWI/I9uDvVlyBAAJ | def set_collection_path_collation(apps, schema_editor):
"""
Treebeard's path comparison logic can fail on certain locales such as sk_SK, which
sort numbers after letters. To avoid this, we explicitly set the collation for the
'path' column to the (non-locale-specific) 'C' collation.
See: https://groups.google.com/d/msg/wagtail/q0leyuCnYWI/I9uDvVlyBAAJ
"""
if schema_editor.connection.vendor == "postgresql":
schema_editor.execute(
"""
ALTER TABLE wagtailcore_collection ALTER COLUMN path TYPE VARCHAR(255) COLLATE "C"
"""
) |
Give the groups who currently manage all collections permission to manage root collections | def grant_instance_level_collection_management_permissions(apps, schema_editor):
"""
Give the groups who currently manage all collections permission to manage root collections
"""
Collection = apps.get_model("wagtailcore.Collection")
Group = apps.get_model("auth.Group")
GroupCollectionPermission = apps.get_model("wagtailcore.GroupCollectionPermission")
Permission = apps.get_model("auth.Permission")
groups_w_permissions = Group.objects.filter(
permissions__content_type__app_label="wagtailcore",
permissions__content_type__model="collection",
permissions__codename__in=[
"add_collection",
"change_collection",
"delete_collection",
],
).values("id", "name", "permissions__id", "permissions__codename")
for root_collection in Collection.objects.filter(depth=1).all():
for row in groups_w_permissions:
GroupCollectionPermission.objects.create(
group_id=row["id"],
permission_id=row["permissions__id"],
collection_id=root_collection.id,
)
# Now remove the model-level permissions for collections
collection_permissions = Permission.objects.filter(
content_type__app_label="wagtailcore",
content_type__model="collection",
codename__in=["add_collection", "change_collection", "delete_collection"],
)
for perm in collection_permissions.all():
perm.group_set.clear() |
Give model-level permission to all groups who have that permission on the root collection | def revert_to_model_level_collection_management_permissions(apps, schema_editor):
"""
Give model-level permission to all groups who have that permission on the root collection
"""
Collection = apps.get_model("wagtailcore.Collection")
GroupCollectionPermission = apps.get_model("wagtailcore.GroupCollectionPermission")
root_collections = Collection.objects.filter(depth=1).all()
group_collection_permissions = GroupCollectionPermission.objects.filter(
permission__content_type__app_label="wagtailcore",
permission__content_type__model="collection",
permission__codename__in=[
"add_collection",
"change_collection",
"delete_collection",
],
collection__in=root_collections,
).select_related("group", "permission")
for row in group_collection_permissions.all():
row.group.permissions.add(row.permission)
# Now delete the instance-level collection management permissions
group_collection_permissions.all().delete() |
Get dictionaries representing the model's field data.
This excludes many to many fields (which are handled by _copy_m2m_relations)' | def _extract_field_data(source, exclude_fields=None):
"""
Get dictionaries representing the model's field data.
This excludes many to many fields (which are handled by _copy_m2m_relations)'
"""
exclude_fields = exclude_fields or []
data_dict = {}
for field in source._meta.get_fields():
# Ignore explicitly excluded fields
if field.name in exclude_fields:
continue
# Ignore reverse relations
if field.auto_created:
continue
# Ignore reverse generic relations
if isinstance(field, GenericRelation):
continue
# Copy parental m2m relations
if field.many_to_many:
if isinstance(field, ParentalManyToManyField):
parental_field = getattr(source, field.name)
if hasattr(parental_field, "all"):
values = parental_field.all()
if values:
data_dict[field.name] = values
continue
# Ignore parent links (page_ptr)
if isinstance(field, models.OneToOneField) and field.remote_field.parent_link:
continue
if isinstance(field, models.ForeignKey):
# Use attname to copy the ID instead of retrieving the instance
# Note: We first need to set the field to None to unset any object
# that's there already just setting _id on its own won't change the
# field until its saved.
data_dict[field.name] = None
data_dict[field.attname] = getattr(source, field.attname)
else:
data_dict[field.name] = getattr(source, field.name)
return data_dict |
Copies non-ParentalManyToMany m2m relations | def _copy_m2m_relations(source, target, exclude_fields=None, update_attrs=None):
"""
Copies non-ParentalManyToMany m2m relations
"""
update_attrs = update_attrs or {}
exclude_fields = exclude_fields or []
for field in source._meta.get_fields():
# Copy m2m relations. Ignore explicitly excluded fields, reverse relations, and Parental m2m fields.
if (
field.many_to_many
and field.name not in exclude_fields
and not field.auto_created
and not isinstance(field, ParentalManyToManyField)
):
try:
# Do not copy m2m links with a through model that has a ParentalKey to the model being copied - these will be copied as child objects
through_model_parental_links = [
field
for field in field.through._meta.get_fields()
if isinstance(field, ParentalKey)
and issubclass(source.__class__, field.related_model)
]
if through_model_parental_links:
continue
except AttributeError:
pass
if field.name in update_attrs:
value = update_attrs[field.name]
else:
value = getattr(source, field.name).all()
getattr(target, field.name).set(value) |
This function populates the "translation_key", and "locale" fields on model instances that were created
before wagtail-localize was added to the site.
This can be called from a data migration, or instead you could use the "bootstrap_translatable_models"
management command. | def bootstrap_translatable_model(model, locale):
"""
This function populates the "translation_key", and "locale" fields on model instances that were created
before wagtail-localize was added to the site.
This can be called from a data migration, or instead you could use the "bootstrap_translatable_models"
management command.
"""
for instance in (
model.objects.filter(translation_key__isnull=True).defer().iterator()
):
instance.translation_key = uuid.uuid4()
instance.locale = locale
instance.save(update_fields=["translation_key", "locale"]) |
Returns a list of all concrete models that inherit from TranslatableMixin.
By default, this only includes models that are direct children of TranslatableMixin,
to get all models, set the include_subclasses attribute to True. | def get_translatable_models(include_subclasses=False):
"""
Returns a list of all concrete models that inherit from TranslatableMixin.
By default, this only includes models that are direct children of TranslatableMixin,
to get all models, set the include_subclasses attribute to True.
"""
translatable_models = [
model
for model in apps.get_models()
if issubclass(model, TranslatableMixin) and not model._meta.abstract
]
if include_subclasses is False:
# Exclude models that inherit from another translatable model
root_translatable_models = set()
for model in translatable_models:
root_translatable_models.add(model.get_translation_model())
translatable_models = [
model for model in translatable_models if model in root_translatable_models
]
return translatable_models |
Return the wagtailcore.Site object for the given hostname and port. | def get_site_for_hostname(hostname, port):
"""Return the wagtailcore.Site object for the given hostname and port."""
Site = apps.get_model("wagtailcore.Site")
sites = list(
Site.objects.annotate(
match=Case(
# annotate the results by best choice descending
# put exact hostname+port match first
When(hostname=hostname, port=port, then=MATCH_HOSTNAME_PORT),
# then put hostname+default (better than just hostname or just default)
When(
hostname=hostname, is_default_site=True, then=MATCH_HOSTNAME_DEFAULT
),
# then match default with different hostname. there is only ever
# one default, so order it above (possibly multiple) hostname
# matches so we can use sites[0] below to access it
When(is_default_site=True, then=MATCH_DEFAULT),
# because of the filter below, if it's not default then its a hostname match
default=MATCH_HOSTNAME,
output_field=IntegerField(),
)
)
.filter(Q(hostname=hostname) | Q(is_default_site=True))
.order_by("match")
.select_related("root_page")
)
if sites:
# if there's a unique match or hostname (with port or default) match
if len(sites) == 1 or sites[0].match in (
MATCH_HOSTNAME_PORT,
MATCH_HOSTNAME_DEFAULT,
):
return sites[0]
# if there is a default match with a different hostname, see if
# there are many hostname matches. if only 1 then use that instead
# otherwise we use the default
if sites[0].match == MATCH_DEFAULT:
return sites[len(sites) == 2]
raise Site.DoesNotExist() |
Returns a list of all non-abstract Page model classes defined in this project. | def get_page_models():
"""
Returns a list of all non-abstract Page model classes defined in this project.
"""
return PAGE_MODEL_CLASSES.copy() |
Returns a queryset of all ContentType objects corresponding to Page model classes. | def get_page_content_types(include_base_page_type=True):
"""
Returns a queryset of all ContentType objects corresponding to Page model classes.
"""
models = get_page_models()
if not include_base_page_type:
models.remove(Page)
content_type_ids = [
ct.pk for ct in ContentType.objects.get_for_models(*models).values()
]
return ContentType.objects.filter(pk__in=content_type_ids).order_by("model") |
Returns the content type to use as a default for pages whose content type
has been deleted. | def get_default_page_content_type():
"""
Returns the content type to use as a default for pages whose content type
has been deleted.
"""
return ContentType.objects.get_for_model(Page) |
helper method to extract tag attributes, as a dict of un-escaped strings | def extract_attrs(attr_string: str) -> dict:
"""
helper method to extract tag attributes, as a dict of un-escaped strings
"""
attributes = {}
for name, val in FIND_ATTRS.findall(attr_string):
val = (
val.replace("<", "<")
.replace(">", ">")
.replace(""", '"')
.replace("&", "&")
)
attributes[name] = val
return attributes |
Expand database-representation HTML into proper HTML usable on front-end templates | def expand_db_html(html):
"""
Expand database-representation HTML into proper HTML usable on front-end templates
"""
rewriter = get_rewriter()
return rewriter(html) |
Return a plain text version of a rich text string, suitable for search indexing;
like Django's strip_tags, but ensures that whitespace is left between block elements
so that <p>hello</p><p>world</p> gives "hello world", not "helloworld". | def get_text_for_indexing(richtext):
"""
Return a plain text version of a rich text string, suitable for search indexing;
like Django's strip_tags, but ensures that whitespace is left between block elements
so that <p>hello</p><p>world</p> gives "hello world", not "helloworld".
"""
# insert space after </p>, </h1> - </h6>, </li> and </blockquote> tags
richtext = re.sub(
r"(</(p|h\d|li|blockquote)>)", r"\1 ", richtext, flags=re.IGNORECASE
)
# also insert space after <br /> and <hr />
richtext = re.sub(r"(<(br|hr)\s*/>)", r"\1 ", richtext, flags=re.IGNORECASE)
return unescape(strip_tags(richtext).strip()) |
Checks each page model with search_fields to core fields are included | def page_search_fields_check(app_configs, **kwargs):
"""Checks each page model with search_fields to core fields are included"""
from wagtail.models import Page, get_page_models
page_models = get_page_models()
errors = []
for cls in page_models:
# Don't check models where indexing has been explicitly disabled
if not cls.search_fields:
continue
# Only checks an initial subset of fields as only need to check some are missing to show the warning
if not all(field in cls.search_fields for field in Page.search_fields[:10]):
errors.append(
Warning(
"Core Page fields missing in `search_fields`",
hint=" ".join(
[
"Ensure that {} extends the Page model search fields",
"`search_fields = Page.search_fields + [...]`",
]
).format(cls.__name__),
obj=cls,
id="wagtailsearch.W001",
)
)
return errors |
Has the same result as Python's reduce function, but performs the calculations in a different order.
This is important when the operator is constructing data structures such as search query classes.
This method will make the resulting data structures flatter, so operations that need to traverse
them don't end up crashing with recursion errors.
For example:
Python's builtin reduce() function will do the following calculation:
reduce(add, [1, 2, 3, 4, 5, 6, 7, 8])
(1 + (2 + (3 + (4 + (5 + (6 + (7 + 8)))))))
When using this with query classes, it would create a large data structure with a depth of 7
Whereas balanced_reduce will execute this like so:
balanced_reduce(add, [1, 2, 3, 4, 5, 6, 7, 8])
((1 + 2) + (3 + 4)) + ((5 + 6) + (7 + 8))
Which only has a depth of 2 | def balanced_reduce(operator, seq, initializer=NOT_SET):
"""
Has the same result as Python's reduce function, but performs the calculations in a different order.
This is important when the operator is constructing data structures such as search query classes.
This method will make the resulting data structures flatter, so operations that need to traverse
them don't end up crashing with recursion errors.
For example:
Python's builtin reduce() function will do the following calculation:
reduce(add, [1, 2, 3, 4, 5, 6, 7, 8])
(1 + (2 + (3 + (4 + (5 + (6 + (7 + 8)))))))
When using this with query classes, it would create a large data structure with a depth of 7
Whereas balanced_reduce will execute this like so:
balanced_reduce(add, [1, 2, 3, 4, 5, 6, 7, 8])
((1 + 2) + (3 + 4)) + ((5 + 6) + (7 + 8))
Which only has a depth of 2
"""
# Casting all iterables to list makes the implementation simpler
if not isinstance(seq, list):
seq = list(seq)
# Note, it needs to be possible to use None as an initial value
if initializer is not NOT_SET:
if len(seq) == 0:
return initializer
else:
return operator(initializer, balanced_reduce(operator, seq))
if len(seq) == 0:
raise TypeError("reduce() of empty sequence with no initial value")
elif len(seq) == 1:
return seq[0]
else:
break_point = len(seq) // 2
first_set = balanced_reduce(operator, seq[:break_point])
second_set = balanced_reduce(operator, seq[break_point:])
return operator(first_set, second_set) |
This takes a query string typed in by a user and extracts the following:
- Quoted terms (for phrase search)
- Filters
For example, the following query:
`hello "this is a phrase" live:true` would be parsed into:
filters: {'live': 'true'}
tokens: And([PlainText('hello'), Phrase('this is a phrase')]) | def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE):
"""
This takes a query string typed in by a user and extracts the following:
- Quoted terms (for phrase search)
- Filters
For example, the following query:
`hello "this is a phrase" live:true` would be parsed into:
filters: {'live': 'true'}
tokens: And([PlainText('hello'), Phrase('this is a phrase')])
"""
filters, query_string = separate_filters_from_query(query_string)
is_phrase = False
tokens = []
if '"' in query_string:
parts = query_string.split('"')
else:
parts = query_string.split("'")
for part in parts:
part = part.strip()
if part:
if is_phrase:
tokens.append(Phrase(part))
else:
tokens.append(
PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR)
)
is_phrase = not is_phrase
if tokens:
if operator == "or":
search_query = OR(tokens)
else:
search_query = AND(tokens)
else:
search_query = zero_terms
return filters, search_query |
Returns all descendants of a model, including the model itself. | def get_descendant_models(model):
"""
Returns all descendants of a model, including the model itself.
"""
descendant_models = {
other_model
for other_model in apps.get_models()
if issubclass(other_model, model)
}
descendant_models.add(model)
return descendant_models |
Returns content types ids for the ancestors of this model, excluding it. | def get_ancestors_content_types_pks(model):
"""
Returns content types ids for the ancestors of this model, excluding it.
"""
from django.contrib.contenttypes.models import ContentType
return [
ct.pk
for ct in ContentType.objects.get_for_models(
*model._meta.get_parent_list()
).values()
] |
Returns content types ids for the descendants of this model, including it. | def get_descendants_content_types_pks(model):
"""
Returns content types ids for the descendants of this model, including it.
"""
from django.contrib.contenttypes.models import ContentType
return [
ct.pk
for ct in ContentType.objects.get_for_models(
*get_descendant_models(model)
).values()
] |
This function finds the root model for any given model. The root model is
the highest concrete model that it descends from. If the model doesn't
descend from another concrete model then the model is it's own root model so
it is returned.
Examples:
>>> get_model_root(wagtailcore.Page)
wagtailcore.Page
>>> get_model_root(myapp.HomePage)
wagtailcore.Page
>>> get_model_root(wagtailimages.Image)
wagtailimages.Image | def get_model_root(model):
"""
This function finds the root model for any given model. The root model is
the highest concrete model that it descends from. If the model doesn't
descend from another concrete model then the model is it's own root model so
it is returned.
Examples:
>>> get_model_root(wagtailcore.Page)
wagtailcore.Page
>>> get_model_root(myapp.HomePage)
wagtailcore.Page
>>> get_model_root(wagtailimages.Image)
wagtailimages.Image
"""
if model._meta.parents:
parent_model = list(model._meta.parents.items())[0][0]
return get_model_root(parent_model)
return model |
There's two formats for the dotted_path.
One with the backend class (old) and one without (new)
eg:
old: wagtail.search.backends.elasticsearch.ElasticsearchSearchBackend
new: wagtail.search.backends.elasticsearch
If a new style dotted path was specified, this function would
look for a backend class from the "SearchBackend" attribute. | def import_backend(dotted_path):
"""
There's two formats for the dotted_path.
One with the backend class (old) and one without (new)
eg:
old: wagtail.search.backends.elasticsearch.ElasticsearchSearchBackend
new: wagtail.search.backends.elasticsearch
If a new style dotted path was specified, this function would
look for a backend class from the "SearchBackend" attribute.
"""
try:
# New
backend_module = import_module(dotted_path)
return backend_module.SearchBackend
except ImportError as e:
try:
# Old
return import_string(dotted_path)
except ImportError:
raise ImportError from e |
Returns the appropriate search backend for the current 'default' database system | def SearchBackend(params):
"""
Returns the appropriate search backend for the current 'default' database system
"""
if connection.vendor == "postgresql":
from .postgres.postgres import PostgresSearchBackend
return PostgresSearchBackend(params)
elif connection.vendor == "mysql":
from .mysql.mysql import MySQLSearchBackend
return MySQLSearchBackend(params)
elif connection.vendor == "sqlite":
global USE_SQLITE_FTS
if USE_SQLITE_FTS is None:
from .sqlite.utils import fts5_available, fts_table_exists
if not fts5_available():
USE_SQLITE_FTS = False
elif not fts_table_exists():
USE_SQLITE_FTS = False
warnings.warn(
"The installed SQLite library supports full-text search, but the table for storing "
"searchable content is missing. This probably means SQLite was upgraded after the "
"migration was applied. To enable full-text search, reapply wagtailsearch migration 0006 "
"or create the table manually."
)
else:
USE_SQLITE_FTS = True
if USE_SQLITE_FTS:
from .sqlite.sqlite import SQLiteSearchBackend
return SQLiteSearchBackend(params)
else:
from .fallback import DatabaseSearchBackend
return DatabaseSearchBackend(params)
else:
from .fallback import DatabaseSearchBackend
return DatabaseSearchBackend(params) |
Turns this query into a normalized version.
For example, And(Not(PlainText("Arepa")), PlainText("Crepe")) would be turned into AndNot(PlainText("Crepe"), PlainText("Arepa")): "Crepe AND NOT Arepa".
This is done because we need to get the NOT operator to the front of the query, so it can be used in the search, because the SQLite FTS5 module doesn't support the unary NOT operator. This means that, in order to support the NOT operator, we need to match against the non-negated version of the query, and then return everything that is not in the results of the non-negated query. | def normalize(search_query: SearchQuery) -> Tuple[SearchQuery]:
"""
Turns this query into a normalized version.
For example, And(Not(PlainText("Arepa")), PlainText("Crepe")) would be turned into AndNot(PlainText("Crepe"), PlainText("Arepa")): "Crepe AND NOT Arepa".
This is done because we need to get the NOT operator to the front of the query, so it can be used in the search, because the SQLite FTS5 module doesn't support the unary NOT operator. This means that, in order to support the NOT operator, we need to match against the non-negated version of the query, and then return everything that is not in the results of the non-negated query.
"""
if isinstance(search_query, Phrase):
return search_query # We can't normalize a Phrase.
if isinstance(search_query, PlainText):
return search_query # We can't normalize a PlainText.
if isinstance(search_query, And):
normalized_subqueries: List[SearchQuery] = [
normalize(subquery) for subquery in search_query.subqueries
] # This builds a list of normalized subqueries.
not_negated_subqueries = [
subquery
for subquery in normalized_subqueries
if not isinstance(subquery, Not)
] # All the non-negated subqueries.
not_negated_subqueries = [
subquery
for subquery in not_negated_subqueries
if not isinstance(subquery, MatchAll)
] # We can ignore all MatchAll SearchQueries here, because they are redundant.
negated_subqueries = [
subquery.subquery
for subquery in normalized_subqueries
if isinstance(subquery, Not)
]
if (
negated_subqueries == []
): # If there are no negated subqueries, return an And(), now without the redundant MatchAll subqueries.
return And(not_negated_subqueries)
for subquery in (
negated_subqueries
): # If there's a negated MatchAll subquery, then nothing will get matched.
if isinstance(subquery, MatchAll):
return Not(MatchAll())
return AndNot(And(not_negated_subqueries), Or(negated_subqueries))
if isinstance(search_query, Or):
normalized_subqueries: List[SearchQuery] = [
normalize(subquery) for subquery in search_query.subqueries
] # This builds a list of (subquery, negated) tuples.
negated_subqueries = [
subquery.subquery
for subquery in normalized_subqueries
if isinstance(subquery, Not)
]
if (
negated_subqueries == []
): # If there are no negated subqueries, return an Or().
return Or(normalized_subqueries)
for subquery in (
negated_subqueries
): # If there's a MatchAll subquery, then anything will get matched.
if isinstance(subquery, MatchAll):
return MatchAll()
not_negated_subqueries = [
subquery
for subquery in normalized_subqueries
if not isinstance(subquery, Not)
] # All the non-negated subqueries.
not_negated_subqueries = [
subquery
for subquery in not_negated_subqueries
if not isinstance(subquery, MatchAll)
] # We can ignore all MatchAll SearchQueries here, because they are redundant.
return AndNot(MatchAll(), And(negated_subqueries))
if isinstance(search_query, Not):
normalized = normalize(search_query.subquery)
return Not(normalized) # Normalize the subquery, then invert it.
if isinstance(search_query, MatchAll):
return search_query |
This takes a search backend and a list of models. By calling the
get_index_for_model method on the search backend, it groups the models into
the indices that they will be indexed into.
It returns an ordered mapping of indices to lists of models within each
index.
For example, Elasticsearch 2 requires all page models to be together, but
separate from other content types (eg, images and documents) to prevent
field mapping collisions:
>>> group_models_by_index(elasticsearch2_backend, [
... wagtailcore.Page,
... myapp.HomePage,
... myapp.StandardPage,
... wagtailimages.Image
... ])
{
<Index wagtailcore_page>: [wagtailcore.Page, myapp.HomePage, myapp.StandardPage],
<Index wagtailimages_image>: [wagtailimages.Image],
} | def group_models_by_index(backend, models):
"""
This takes a search backend and a list of models. By calling the
get_index_for_model method on the search backend, it groups the models into
the indices that they will be indexed into.
It returns an ordered mapping of indices to lists of models within each
index.
For example, Elasticsearch 2 requires all page models to be together, but
separate from other content types (eg, images and documents) to prevent
field mapping collisions:
>>> group_models_by_index(elasticsearch2_backend, [
... wagtailcore.Page,
... myapp.HomePage,
... myapp.StandardPage,
... wagtailimages.Image
... ])
{
<Index wagtailcore_page>: [wagtailcore.Page, myapp.HomePage, myapp.StandardPage],
<Index wagtailimages_image>: [wagtailimages.Image],
}
"""
indices = {}
models_by_index = collections.OrderedDict()
for model in models:
index = backend.get_index_for_model(model)
if index:
indices.setdefault(index.name, index)
models_by_index.setdefault(index.name, [])
models_by_index[index.name].append(model)
return collections.OrderedDict(
[
(indices[index_name], index_models)
for index_name, index_models in models_by_index.items()
]
) |
A context manager to allow testing of different search_fields configurations
without permanently changing the models' search_fields. | def patch_search_fields(model, new_search_fields):
"""
A context manager to allow testing of different search_fields configurations
without permanently changing the models' search_fields.
"""
old_search_fields = model.search_fields
model.search_fields = new_search_fields
yield
model.search_fields = old_search_fields |
Retrieve the global list of menu items for the snippet action menu,
which may then be customised on a per-request basis | def get_base_snippet_action_menu_items(model):
"""
Retrieve the global list of menu items for the snippet action menu,
which may then be customised on a per-request basis
"""
menu_items = [
SaveMenuItem(order=0),
DeleteMenuItem(order=10),
]
if issubclass(model, DraftStateMixin):
menu_items += [
UnpublishMenuItem(order=20),
PublishMenuItem(order=30),
]
if issubclass(model, WorkflowMixin):
menu_items += [
CancelWorkflowMenuItem(order=40),
RestartWorkflowMenuItem(order=50),
SubmitForModerationMenuItem(order=60),
]
if issubclass(model, LockableMixin):
menu_items.append(LockedMenuItem(order=10000))
for hook in hooks.get_hooks("register_snippet_action_menu_item"):
action_menu_item = hook(model)
if action_menu_item:
menu_items.append(action_menu_item)
return menu_items |
Called from WagtailSnippetsAppConfig.ready(), at which point we can be sure all models
have been loaded and register_snippet can safely construct viewsets. | def register_deferred_snippets():
"""
Called from WagtailSnippetsAppConfig.ready(), at which point we can be sure all models
have been loaded and register_snippet can safely construct viewsets.
"""
global DEFER_REGISTRATION
DEFER_REGISTRATION = False
for registerable, viewset in DEFERRED_REGISTRATIONS:
_register_snippet_immediately(registerable, viewset) |
true if user has 'add', 'change' or 'delete' permission on this model | def user_can_edit_snippet_type(user, model):
"""true if user has 'add', 'change' or 'delete' permission on this model"""
for action in ("add", "change", "delete"):
if user.has_perm(get_permission_name(action, model)):
return True
return False |
true if user has 'add', 'change' or 'delete' permission
on any model registered as a snippet type | def user_can_edit_snippets(user):
"""
true if user has 'add', 'change' or 'delete' permission
on any model registered as a snippet type
"""
snippet_models = get_snippet_models()
for model in snippet_models:
if user_can_edit_snippet_type(user, model):
return True
return False |
Retrieve a model from an app_label / model_name combo.
Raise Http404 if the model is not a valid snippet type. | def get_snippet_model_from_url_params(app_name, model_name):
"""
Retrieve a model from an app_label / model_name combo.
Raise Http404 if the model is not a valid snippet type.
"""
try:
model = apps.get_model(app_name, model_name)
except LookupError:
raise Http404
if model not in get_snippet_models():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return model |
Outputs a page's URL as relative (/foo/bar/) if it's within the same site as the
current page, or absolute (http://example.com/foo/bar/) if not.
If kwargs contains a fallback view name and page is None, the fallback view url will be returned. | def pageurl(context, page, fallback=None):
"""
Outputs a page's URL as relative (/foo/bar/) if it's within the same site as the
current page, or absolute (http://example.com/foo/bar/) if not.
If kwargs contains a fallback view name and page is None, the fallback view url will be returned.
"""
if page is None and fallback:
return resolve_url(fallback)
if not isinstance(page, Page):
raise ValueError("pageurl tag expected a Page object, got %r" % page)
return page.get_url(request=context.get("request")) |
Outputs a page's absolute URL (http://example.com/foo/bar/)
If kwargs contains a fallback view name and page is None, the fallback view url will be returned. | def fullpageurl(context, page, fallback=None):
"""
Outputs a page's absolute URL (http://example.com/foo/bar/)
If kwargs contains a fallback view name and page is None, the fallback view url will be returned.
"""
if page is None and fallback:
fallback_url = resolve_url(fallback)
if fallback_url and "request" in context and fallback_url[0] == "/":
fallback_url = context["request"].build_absolute_uri(fallback_url)
return fallback_url
if not isinstance(page, Page):
raise ValueError("fullpageurl tag expected a Page object, got %r" % page)
return page.get_full_url(request=context.get("request")) |
Returns the URL for the page that has the given slug.
First tries to find a page on the current site. If that fails or a request
is not available in the context, then returns the URL for the first page
that matches the slug on any site. | def slugurl(context, slug):
"""
Returns the URL for the page that has the given slug.
First tries to find a page on the current site. If that fails or a request
is not available in the context, then returns the URL for the first page
that matches the slug on any site.
"""
page = None
try:
site = Site.find_for_request(context["request"])
current_site = site
except KeyError:
# No site object found - allow the fallback below to take place.
pass
else:
if current_site is not None:
page = Page.objects.in_site(current_site).filter(slug=slug).first()
# If no page is found, fall back to searching the whole tree.
if page is None:
page = Page.objects.filter(slug=slug).first()
if page:
# call pageurl() instead of page.relative_url() here so we get the ``accepts_kwarg`` logic
return pageurl(context, page) |
Render the passed item of StreamField content, passing the current template context
if there's an identifiable way of doing so (i.e. if it has a `render_as_block` method). | def include_block(parser, token):
"""
Render the passed item of StreamField content, passing the current template context
if there's an identifiable way of doing so (i.e. if it has a `render_as_block` method).
"""
tokens = token.split_contents()
try:
tag_name = tokens.pop(0)
block_var_token = tokens.pop(0)
except IndexError:
raise template.TemplateSyntaxError(
"%r tag requires at least one argument" % tag_name
)
block_var = parser.compile_filter(block_var_token)
if tokens and tokens[0] == "with":
tokens.pop(0)
extra_context = token_kwargs(tokens, parser)
else:
extra_context = None
use_parent_context = True
if tokens and tokens[0] == "only":
tokens.pop(0)
use_parent_context = False
if tokens:
raise template.TemplateSyntaxError(
f"Unexpected argument to {tag_name!r} tag: {tokens[0]!r}"
)
return IncludeBlockNode(block_var, extra_context, use_parent_context) |
Returns the Site object for the given request | def wagtail_site(context):
"""
Returns the Site object for the given request
"""
try:
request = context["request"]
except KeyError:
return None
return Site.find_for_request(request=request) |
A helper function to define cache tags without duplicating `do_cache`. | def register_cache_tag(tag_name, node_class):
"""
A helper function to define cache tags without duplicating `do_cache`.
"""
@register.tag(tag_name)
def do_cache(parser, token):
# Implementation copied from `django.templatetags.cache.do_cache`
nodelist = parser.parse((f"end{tag_name}",))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError(
f"'{tokens[0]}' tag requires at least 2 arguments."
)
if len(tokens) > 3 and tokens[-1].startswith("using="):
cache_name = parser.compile_filter(tokens[-1][len("using=") :])
tokens = tokens[:-1]
else:
cache_name = None
return node_class(
nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
cache_name,
) |
Dummy sendfile backend implementation. | def sendfile(request, filename, **kwargs):
"""
Dummy sendfile backend implementation.
"""
return HttpResponse("Dummy backend response") |
Translates a nested dict structure into a flat form data dict
with hyphen-separated keys.
.. code-block:: python
nested_form_data({
'foo': 'bar',
'parent': {
'child': 'field',
},
})
# Returns: {'foo': 'bar', 'parent-child': 'field'} | def nested_form_data(data):
"""
Translates a nested dict structure into a flat form data dict
with hyphen-separated keys.
.. code-block:: python
nested_form_data({
'foo': 'bar',
'parent': {
'child': 'field',
},
})
# Returns: {'foo': 'bar', 'parent-child': 'field'}
"""
return {"-".join(key): value for key, value in _nested_form_data(data)} |
Takes a list of (block_type, value) tuples and turns it in to
StreamField form data. Use this within a :func:`nested_form_data`
call, with the field name as the key.
.. code-block:: python
nested_form_data({'content': streamfield([
('text', 'Hello, world'),
])})
# Returns:
# {
# 'content-count': '1',
# 'content-0-type': 'text',
# 'content-0-value': 'Hello, world',
# 'content-0-order': '0',
# 'content-0-deleted': '',
# } | def streamfield(items):
"""
Takes a list of (block_type, value) tuples and turns it in to
StreamField form data. Use this within a :func:`nested_form_data`
call, with the field name as the key.
.. code-block:: python
nested_form_data({'content': streamfield([
('text', 'Hello, world'),
])})
# Returns:
# {
# 'content-count': '1',
# 'content-0-type': 'text',
# 'content-0-value': 'Hello, world',
# 'content-0-order': '0',
# 'content-0-deleted': '',
# }
"""
def to_block(index, item):
block, value = item
return {"type": block, "value": value, "deleted": "", "order": str(index)}
data_dict = {str(index): to_block(index, item) for index, item in enumerate(items)}
data_dict["count"] = str(len(data_dict))
return data_dict |
Takes a list of form data for an InlineFormset and translates
it in to valid POST data. Use this within a :func:`nested_form_data`
call, with the formset relation name as the key.
.. code-block:: python
nested_form_data({'lines': inline_formset([
{'text': 'Hello'},
{'text': 'World'},
])})
# Returns:
# {
# 'lines-TOTAL_FORMS': '2',
# 'lines-INITIAL_FORMS': '0',
# 'lines-MIN_NUM_FORMS': '0',
# 'lines-MAX_NUM_FORMS': '1000',
# 'lines-0-text': 'Hello',
# 'lines-0-ORDER': '0',
# 'lines-0-DELETE': '',
# 'lines-1-text': 'World',
# 'lines-1-ORDER': '1',
# 'lines-1-DELETE': '',
# } | def inline_formset(items, initial=0, min=0, max=1000):
"""
Takes a list of form data for an InlineFormset and translates
it in to valid POST data. Use this within a :func:`nested_form_data`
call, with the formset relation name as the key.
.. code-block:: python
nested_form_data({'lines': inline_formset([
{'text': 'Hello'},
{'text': 'World'},
])})
# Returns:
# {
# 'lines-TOTAL_FORMS': '2',
# 'lines-INITIAL_FORMS': '0',
# 'lines-MIN_NUM_FORMS': '0',
# 'lines-MAX_NUM_FORMS': '1000',
# 'lines-0-text': 'Hello',
# 'lines-0-ORDER': '0',
# 'lines-0-DELETE': '',
# 'lines-1-text': 'World',
# 'lines-1-ORDER': '1',
# 'lines-1-DELETE': '',
# }
"""
def to_form(index, item):
defaults = {
"ORDER": str(index),
"DELETE": "",
}
defaults.update(item)
return defaults
data_dict = {str(index): to_form(index, item) for index, item in enumerate(items)}
data_dict.update(
{
"TOTAL_FORMS": str(len(data_dict)),
"INITIAL_FORMS": str(initial),
"MIN_NUM_FORMS": str(min),
"MAX_NUM_FORMS": str(max),
}
)
return data_dict |
Converts an HTML-like rich text string to the data format required by
the currently active rich text editor.
:param editor: An alternative editor name as defined in ``WAGTAILADMIN_RICH_TEXT_EDITORS``
:param features: A list of features allowed in the rich text content (see :ref:`rich_text_features`)
.. code-block:: python
self.assertCanCreate(root_page, ContentPage, nested_form_data({
'title': 'About us',
'body': rich_text('<p>Lorem ipsum dolor sit amet</p>'),
})) | def rich_text(value, editor="default", features=None):
"""
Converts an HTML-like rich text string to the data format required by
the currently active rich text editor.
:param editor: An alternative editor name as defined in ``WAGTAILADMIN_RICH_TEXT_EDITORS``
:param features: A list of features allowed in the rich text content (see :ref:`rich_text_features`)
.. code-block:: python
self.assertCanCreate(root_page, ContentPage, nested_form_data({
'title': 'About us',
'body': rich_text('<p>Lorem ipsum dolor sit amet</p>'),
}))
"""
widget = get_rich_text_editor_widget(editor, features)
return widget.format_value(value) |
Helper function to translate a possibly-timezone-aware datetime into the format used in the
go_live_at / expire_at form fields - "YYYY-MM-DD hh:mm", with no timezone indicator.
This will be interpreted as being in the server's timezone (settings.TIME_ZONE), so we
need to pass it through timezone.localtime to ensure that the client and server are in
agreement about what the timestamp means. | def submittable_timestamp(timestamp):
"""
Helper function to translate a possibly-timezone-aware datetime into the format used in the
go_live_at / expire_at form fields - "YYYY-MM-DD hh:mm", with no timezone indicator.
This will be interpreted as being in the server's timezone (settings.TIME_ZONE), so we
need to pass it through timezone.localtime to ensure that the client and server are in
agreement about what the timestamp means.
"""
if timezone.is_aware(timestamp):
return timezone.localtime(timestamp).strftime("%Y-%m-%d %H:%M")
else:
return timestamp.strftime("%Y-%m-%d %H:%M") |
Registers order against the model content_type, used to
control the order the models and its permissions appear
in the groups object permission editor | def register(model, **kwargs):
"""
Registers order against the model content_type, used to
control the order the models and its permissions appear
in the groups object permission editor
"""
order = kwargs.pop("order", None)
if order is not None:
content_type = ContentType.objects.get_for_model(resolve_model_string(model))
CONTENT_TYPE_ORDER[content_type.id] = order |
Strip model name from the end of the label, e.g. "Can deliver pizza" for a
Pizza model becomes "Can deliver". For permissions in the model's
Meta.default_permissions with default labels, also replace underscores
with spaces.
This is used to display custom model permissions in the admin.
See https://github.com/wagtail/wagtail/issues/10982. | def normalize_permission_label(permission: Permission):
"""
Strip model name from the end of the label, e.g. "Can deliver pizza" for a
Pizza model becomes "Can deliver". For permissions in the model's
Meta.default_permissions with default labels, also replace underscores
with spaces.
This is used to display custom model permissions in the admin.
See https://github.com/wagtail/wagtail/issues/10982.
"""
label = permission.name
content_type = permission.content_type
model = content_type.model_class()
verbose_name = default_verbose_name = content_type.name
if model:
default_verbose_name = camel_case_to_spaces(model._meta.object_name)
# If it's in default_permissions and the label matches Django's default
# label, remove the model name from the end of the label. Also replace
# underscores with spaces, as Django uses the action internal name as-is
# for the permission label, which means it tends to be in snake_case.
for action in model._meta.default_permissions:
default_codename = get_permission_codename(action, model._meta)
is_default = permission.codename == default_codename
if is_default and permission.name.startswith(f"Can {action}"):
return f"Can {action.replace('_', ' ')}"
# For all other cases (including custom permissions), try to remove the
# verbose name from the end of the label. This only works if the label
# matches the current verbose name or Django's default verbose name.
for name in (default_verbose_name, verbose_name):
if label.lower().endswith(name.lower()):
return label[: -len(name)].strip()
return label |
Given a bound field with a queryset of Permission objects - which must be using
the CheckboxSelectMultiple widget - construct a list of dictionaries for 'objects':
'objects': [
{
'object': name_of_some_content_object,
'add': checkbox,
'change': checkbox,
'delete': checkbox,
'publish': checkbox, # only if the model extends DraftStateMixin
'custom': list_of_checkboxes_for_custom_permissions
},
]
and a list of other permissions:
'others': [
(any_non_add_change_delete_permission, checkbox),
]
(where 'checkbox' is an object with a tag() method that renders the checkbox as HTML;
this is a BoundWidget on Django >=1.11)
- and returns a table template formatted with this list. | def format_permissions(permission_bound_field):
"""
Given a bound field with a queryset of Permission objects - which must be using
the CheckboxSelectMultiple widget - construct a list of dictionaries for 'objects':
'objects': [
{
'object': name_of_some_content_object,
'add': checkbox,
'change': checkbox,
'delete': checkbox,
'publish': checkbox, # only if the model extends DraftStateMixin
'custom': list_of_checkboxes_for_custom_permissions
},
]
and a list of other permissions:
'others': [
(any_non_add_change_delete_permission, checkbox),
]
(where 'checkbox' is an object with a tag() method that renders the checkbox as HTML;
this is a BoundWidget on Django >=1.11)
- and returns a table template formatted with this list.
"""
permissions = permission_bound_field.field._queryset
# get a distinct and ordered list of the content types that these permissions relate to.
# relies on Permission model default ordering, dict.fromkeys() retaining that order
# from the queryset, and the stability of sorted().
content_type_ids = sorted(
dict.fromkeys(permissions.values_list("content_type_id", flat=True)),
key=lambda ct: CONTENT_TYPE_ORDER.get(ct, float("inf")),
)
# iterate over permission_bound_field to build a lookup of individual renderable
# checkbox objects
# checkbox.data['value'] gives a ModelChoiceIteratorValue
checkboxes_by_id = {
int(checkbox.data["value"].value): checkbox
for checkbox in permission_bound_field
}
# Permissions that are known by Wagtail, to be shown under their own columns.
# Other permissions will be shown under the "custom permissions" column.
main_permission_names = ["add", "change", "delete", "publish", "lock", "unlock"]
# Only show the columns for these permissions if any of the model has them.
extra_perms_exist = {
"publish": False,
"lock": False,
"unlock": False,
"custom": False,
}
# Batch the permission query for all content types, then group by content type
# (instead of querying permissions for each content type separately)
content_perms_by_ct_id = defaultdict(list)
permissions = permissions.filter(content_type_id__in=content_type_ids)
for permission in permissions:
content_perms_by_ct_id[permission.content_type_id].append(permission)
# Permissions that use Wagtail's Admin content type, to be displayed
# under the "Other permissions" section alongside the
# "Can access Wagtail admin" permission.
admin_content_type = ContentType.objects.get_for_model(Admin)
admin_permissions = content_perms_by_ct_id.pop(admin_content_type.id, [])
other_perms = [(perm, checkboxes_by_id[perm.id]) for perm in admin_permissions]
# We're done with the admin content type, so remove it from the list of content types
# but make sure the sorted order is preserved.
content_type_ids = [
ct_id for ct_id in content_type_ids if ct_id != admin_content_type.pk
]
# Permissions for all other content types, to be displayed under the
# "Object permissions" section.
object_perms = []
# Iterate using the sorted content_type_ids
for ct_id in content_type_ids:
content_perms = content_perms_by_ct_id[ct_id]
content_perms_dict = {}
custom_perms = []
for perm in content_perms:
content_perms_dict["object"] = perm.content_type.name
checkbox = checkboxes_by_id[perm.id]
attrs = {"data-action": "w-bulk#toggle", "data-w-bulk-target": "item"}
# identify the main categories of permission, and assign to
# the relevant dict key, else bung in the 'custom_perms' list
permission_action = perm.codename.split("_")[0]
is_known = (
permission_action in main_permission_names
and perm.codename == f"{permission_action}_{perm.content_type.model}"
)
if is_known:
if permission_action in extra_perms_exist:
extra_perms_exist[permission_action] = True
checkbox.data["attrs"].update(attrs)
checkbox.data["attrs"]["data-w-bulk-group-param"] = permission_action
content_perms_dict[permission_action] = {
"perm": perm,
"checkbox": checkbox,
}
else:
extra_perms_exist["custom"] = True
attrs["data-w-bulk-group-param"] = "custom"
perm_name = normalize_permission_label(perm)
custom_perms.append(
{
"attrs": attrs,
"perm": perm,
"name": perm_name,
"selected": checkbox.data["selected"],
}
)
content_perms_dict["custom"] = custom_perms
object_perms.append(content_perms_dict)
return {
"object_perms": object_perms,
"other_perms": other_perms,
"extra_perms_exist": extra_perms_exist,
} |
Generator function that yields a module object for each installed app
yields tuples of (app_name, module) | def get_app_modules():
"""
Generator function that yields a module object for each installed app
yields tuples of (app_name, module)
"""
for app in apps.get_app_configs():
yield app.name, app.module |
Searches each app module for the specified submodule
yields tuples of (app_name, module) | def get_app_submodules(submodule_name):
"""
Searches each app module for the specified submodule
yields tuples of (app_name, module)
"""
for name, module in get_app_modules():
if module_has_submodule(module, submodule_name):
yield name, import_module(f"{name}.{submodule_name}") |
Modify a view function so its response has the X-Frame-Options HTTP header
set to 'SAMEORIGIN'.
Adapted from Django's xframe_options_sameorigin so that it's always applied
even if the response already has that header set:
https://github.com/django/django/blob/3.2/django/views/decorators/clickjacking.py#L22-L37
Usage:
@xframe_options_sameorigin_override
def some_view(request):
... | def xframe_options_sameorigin_override(view_func):
"""
Modify a view function so its response has the X-Frame-Options HTTP header
set to 'SAMEORIGIN'.
Adapted from Django's xframe_options_sameorigin so that it's always applied
even if the response already has that header set:
https://github.com/django/django/blob/3.2/django/views/decorators/clickjacking.py#L22-L37
Usage:
@xframe_options_sameorigin_override
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp["X-Frame-Options"] = "SAMEORIGIN"
return resp
return functools.wraps(view_func)(wrapped_view) |
Compute the hash of a file-like object, without loading it all into memory. | def hash_filelike(filelike):
"""
Compute the hash of a file-like object, without loading it all into memory.
"""
file_pos = 0
if hasattr(filelike, "tell"):
file_pos = filelike.tell()
try:
# Reset file handler to the start of the file so we hash it all
filelike.seek(0)
except (AttributeError, UnsupportedOperation):
pass
if hasattr(hashlib, "file_digest"):
hasher = hashlib.file_digest(filelike, hashlib.sha1)
else:
hasher = hashlib.sha1()
while True:
data = filelike.read(HASH_READ_SIZE)
if not data:
break
hasher.update(data)
if hasattr(filelike, "seek"):
# Reset the file handler to where it was before
filelike.seek(file_pos)
return hasher.hexdigest() |
Return custom form class if defined and available | def get_custom_form(form_setting):
"""Return custom form class if defined and available"""
try:
return import_string(getattr(settings, form_setting))
except ImportError:
raise ImproperlyConfigured(
"%s refers to a form '%s' that is not available"
% (form_setting, getattr(settings, form_setting))
) |
create a response to send file using backend configured in SENDFILE_BACKEND
If attachment is True the content-disposition header will be set.
This will typically prompt the user to download the file, rather
than view it. The content-disposition filename depends on the
value of attachment_filename:
None (default): Same as filename
False: No content-disposition filename
String: Value used as filename
If no mimetype or encoding are specified, then they will be guessed via the
filename (using the standard python mimetypes module) | def sendfile(
request,
filename,
attachment=False,
attachment_filename=None,
mimetype=None,
encoding=None,
backend=None,
):
"""
create a response to send file using backend configured in SENDFILE_BACKEND
If attachment is True the content-disposition header will be set.
This will typically prompt the user to download the file, rather
than view it. The content-disposition filename depends on the
value of attachment_filename:
None (default): Same as filename
False: No content-disposition filename
String: Value used as filename
If no mimetype or encoding are specified, then they will be guessed via the
filename (using the standard python mimetypes module)
"""
_sendfile = backend or _get_sendfile()
if not os.path.exists(filename):
from django.http import Http404
raise Http404('"%s" does not exist' % filename)
guessed_mimetype, guessed_encoding = guess_type(filename)
if mimetype is None:
if guessed_mimetype:
mimetype = guessed_mimetype
else:
mimetype = "application/octet-stream"
response = _sendfile(request, filename, mimetype=mimetype)
if attachment:
parts = ["attachment"]
else:
parts = ["inline"]
if attachment_filename is None:
attachment_filename = os.path.basename(filename)
if attachment_filename:
from django.utils.encoding import force_str
from wagtail.coreutils import string_to_ascii
attachment_filename = force_str(attachment_filename)
ascii_filename = string_to_ascii(attachment_filename)
parts.append('filename="%s"' % ascii_filename)
if ascii_filename != attachment_filename:
from urllib.parse import quote
quoted_filename = quote(attachment_filename)
parts.append("filename*=UTF-8''%s" % quoted_filename)
response["Content-Disposition"] = "; ".join(parts)
response["Content-length"] = os.path.getsize(filename)
response["Content-Type"] = mimetype
response["Content-Encoding"] = encoding or guessed_encoding
return response |
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about. | def was_modified_since(header=None, mtime=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
"""
try:
if header is None:
raise ValueError
header_date = parsedate_tz(header)
if header_date is None:
raise ValueError
header_mtime = mktime_tz(header_date)
if mtime > header_mtime:
raise ValueError
except (ValueError, OverflowError):
return True
return False |
Similar to how django-modelcluster stores the revision's data and similar to how
django stores dates in the database, this converts the date to UTC if required. | def ensure_utc(value):
"""
Similar to how django-modelcluster stores the revision's data and similar to how
django stores dates in the database, this converts the date to UTC if required.
"""
# https://github.com/wagtail/django-modelcluster/blob/8666f16eaf23ca98afc160b0a4729864411c0563/modelcluster/models.py#L21-L28
if settings.USE_TZ:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone).astimezone(
datetime.timezone.utc
)
else:
# convert to UTC
value = timezone.localtime(value, datetime.timezone.utc)
return value |
Uses Django's parse_datetime(), but ensures to return an aware datetime. | def parse_datetime_localized(date_string):
"""
Uses Django's parse_datetime(), but ensures to return an aware datetime.
"""
dt = parse_datetime(date_string)
if settings.USE_TZ and timezone.is_naive(dt):
dt = timezone.make_aware(dt, timezone=timezone.get_default_timezone())
return dt |
Helper function to format a possibly-timezone-aware datetime into the format
used by Django (e.g. in templates). | def render_timestamp(timestamp):
"""
Helper function to format a possibly-timezone-aware datetime into the format
used by Django (e.g. in templates).
"""
if timezone.is_aware(timestamp):
timestamp = timezone.localtime(timestamp)
return formats.date_format(timestamp, "DATETIME_FORMAT") |
Decorate all the views in the passed urlpatterns list with the given decorator | def decorate_urlpatterns(urlpatterns, decorator):
"""Decorate all the views in the passed urlpatterns list with the given decorator"""
for pattern in urlpatterns:
if hasattr(pattern, "url_patterns"):
# this is an included RegexURLResolver; recursively decorate the views
# contained in it
decorate_urlpatterns(pattern.url_patterns, decorator)
if getattr(pattern, "callback", None):
pattern.callback = update_wrapper(
decorator(pattern.callback), pattern.callback
)
return urlpatterns |
Update a nested dictionary or similar mapping.
Modify ``source`` in place. | def deep_update(source, overrides):
"""Update a nested dictionary or similar mapping.
Modify ``source`` in place.
"""
for key, value in overrides.items():
if isinstance(value, Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source |
Return a PEP 440-compliant version number from VERSION. | def get_version(version):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ""
if version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "rc", "dev": ".dev"}
sub = mapping[version[3]] + str(version[4])
return main + sub |
Return main version (X.Y[.Z]) from VERSION. | def get_main_version(version=None, include_patch=True):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
if include_patch:
parts = 2 if version[2] == 0 else 3
else:
parts = 2
return ".".join(str(x) for x in version[:parts]) |
Return a tuple of the Wagtail version. If version argument is non-empty,
check for correctness of the tuple provided. | def get_complete_version(version=None):
"""
Return a tuple of the Wagtail version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from wagtail import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("dev", "alpha", "beta", "rc", "final")
return version |
Returns the semver version (X.Y.Z[-(alpha|beta)]) from VERSION | def get_semver_version(version):
"Returns the semver version (X.Y.Z[-(alpha|beta)]) from VERSION"
main = ".".join(str(x) for x in version[:3])
sub = ""
if version[3] != "final":
sub = "-{}.{}".format(*version[3:])
return main + sub |
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype. | def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
# This launches a subprocess to decode audio while down-mixing
# and resampling as necessary. Requires the ffmpeg CLI in PATH.
# fmt: off
cmd = [
"ffmpeg",
"-nostdin",
"-threads", "0",
"-i", file,
"-f", "s16le",
"-ac", "1",
"-acodec", "pcm_s16le",
"-ar", str(sr),
"-"
]
# fmt: on
try:
out = run(cmd, capture_output=True, check=True).stdout
except CalledProcessError as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 |
Pad or trim the audio array to N_SAMPLES, as expected by the encoder. | def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length, device=array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array |
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
mel_128=librosa.filters.mel(sr=16000, n_fft=400, n_mels=128),
) | def mel_filters(device, n_mels: int) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
mel_128=librosa.filters.mel(sr=16000, n_fft=400, n_mels=128),
)
"""
assert n_mels in {80, 128}, f"Unsupported n_mels: {n_mels}"
filters_path = os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
with np.load(filters_path, allow_pickle=False) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device) |
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
device: Optional[Union[str, torch.device]]
If given, the audio tensor is moved to this device before STFT
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram | def log_mel_spectrogram(
audio: Union[str, np.ndarray, torch.Tensor],
n_mels: int = 80,
padding: int = 0,
device: Optional[Union[str, torch.device]] = None,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
device: Optional[Union[str, torch.device]]
If given, the audio tensor is moved to this device before STFT
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
if device is not None:
audio = audio.to(device)
if padding > 0:
audio = F.pad(audio, (0, padding))
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec |
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (n_audio,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = n_audio
list of dictionaries containing the probability distribution over all languages. | def detect_language(
model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None
) -> Tuple[Tensor, List[dict]]:
"""
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (n_audio,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = n_audio
list of dictionaries containing the probability distribution over all languages.
"""
if tokenizer is None:
tokenizer = get_tokenizer(
model.is_multilingual, num_languages=model.num_languages
)
if (
tokenizer.language is None
or tokenizer.language_token not in tokenizer.sot_sequence
):
raise ValueError(
"This model doesn't have language tokens so it can't perform lang id"
)
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
# skip encoder forward pass if already-encoded audio features were given
if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
mel = model.encoder(mel)
# forward pass using a single token, startoftranscript
n_audio = mel.shape[0]
x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device) # [n_audio, 1]
logits = model.logits(x, mel)[:, 0]
# collect detected languages; suppress all non-language tokens
mask = torch.ones(logits.shape[-1], dtype=torch.bool)
mask[list(tokenizer.all_language_tokens)] = False
logits[:, mask] = -np.inf
language_tokens = logits.argmax(dim=-1)
language_token_probs = logits.softmax(dim=-1).cpu()
language_probs = [
{
c: language_token_probs[i, j].item()
for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)
}
for i in range(n_audio)
]
if single:
language_tokens = language_tokens[0]
language_probs = language_probs[0]
return language_tokens, language_probs |
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s) | def decode(
model: "Whisper",
mel: Tensor,
options: DecodingOptions = DecodingOptions(),
**kwargs,
) -> Union[DecodingResult, List[DecodingResult]]:
"""
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
"""
if single := mel.ndim == 2:
mel = mel.unsqueeze(0)
if kwargs:
options = replace(options, **kwargs)
result = DecodingTask(model, options).run(mel)
return result[0] if single else result |
Returns sinusoids for positional embedding | def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) |
Apply a median filter of width `filter_width` along the last dimension of `x` | def median_filter(x: torch.Tensor, filter_width: int):
"""Apply a median filter of width `filter_width` along the last dimension of `x`"""
pad_width = filter_width // 2
if x.shape[-1] <= pad_width:
# F.pad requires the padding width to be smaller than the input dimension
return x
if (ndim := x.ndim) <= 2:
# `F.pad` does not support 1D or 2D inputs for reflect padding but supports 3D and 4D
x = x[None, None, :]
assert (
filter_width > 0 and filter_width % 2 == 1
), "`filter_width` should be an odd number"
result = None
x = F.pad(x, (filter_width // 2, filter_width // 2, 0, 0), mode="reflect")
if x.is_cuda:
try:
from .triton_ops import median_filter_cuda
result = median_filter_cuda(x, filter_width)
except (RuntimeError, subprocess.CalledProcessError):
warnings.warn(
"Failed to launch Triton kernels, likely due to missing CUDA toolkit; "
"falling back to a slower median kernel implementation..."
)
if result is None:
# sort() is faster than torch.median (https://github.com/pytorch/pytorch/issues/51450)
result = x.unfold(-1, filter_width, 1).sort()[0][..., filter_width // 2]
if ndim <= 2:
result = result[0, 0]
return result |
Transcribe an audio file using Whisper
Parameters
----------
model: Whisper
The Whisper model instance
audio: Union[str, np.ndarray, torch.Tensor]
The path to the audio file to open, or the audio waveform
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successively used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
word_timestamps: bool
Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
and include the timestamps for each word in each segment.
prepend_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the next word
append_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the previous word
initial_prompt: Optional[str]
Optional text to provide as a prompt for the first window. This can be used to provide, or
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
to make it more likely to predict those word correctly.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
clip_timestamps: Union[str, List[float]]
Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process.
The last end timestamp defaults to the end of the file.
hallucination_silence_threshold: Optional[float]
When word_timestamps is True, skip silent periods longer than this threshold (in seconds)
when a possible hallucination is detected
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None. | def transcribe(
model: "Whisper",
audio: Union[str, np.ndarray, torch.Tensor],
*,
verbose: Optional[bool] = None,
temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
compression_ratio_threshold: Optional[float] = 2.4,
logprob_threshold: Optional[float] = -1.0,
no_speech_threshold: Optional[float] = 0.6,
condition_on_previous_text: bool = True,
initial_prompt: Optional[str] = None,
word_timestamps: bool = False,
prepend_punctuations: str = "\"'“¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
clip_timestamps: Union[str, List[float]] = "0",
hallucination_silence_threshold: Optional[float] = None,
**decode_options,
):
"""
Transcribe an audio file using Whisper
Parameters
----------
model: Whisper
The Whisper model instance
audio: Union[str, np.ndarray, torch.Tensor]
The path to the audio file to open, or the audio waveform
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successively used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
word_timestamps: bool
Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
and include the timestamps for each word in each segment.
prepend_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the next word
append_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the previous word
initial_prompt: Optional[str]
Optional text to provide as a prompt for the first window. This can be used to provide, or
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
to make it more likely to predict those word correctly.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
clip_timestamps: Union[str, List[float]]
Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process.
The last end timestamp defaults to the end of the file.
hallucination_silence_threshold: Optional[float]
When word_timestamps is True, skip silent periods longer than this threshold (in seconds)
when a possible hallucination is detected
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = torch.float16 if decode_options.get("fp16", True) else torch.float32
if model.device == torch.device("cpu"):
if torch.cuda.is_available():
warnings.warn("Performing inference on CPU when CUDA is available")
if dtype == torch.float16:
warnings.warn("FP16 is not supported on CPU; using FP32 instead")
dtype = torch.float32
if dtype == torch.float32:
decode_options["fp16"] = False
# Pad 30-seconds of silence to the input audio, for slicing
mel = log_mel_spectrogram(audio, model.dims.n_mels, padding=N_SAMPLES)
content_frames = mel.shape[-1] - N_FRAMES
content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE)
if decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language"
)
mel_segment = pad_or_trim(mel, N_FRAMES).to(model.device).to(dtype)
_, probs = model.detect_language(mel_segment)
decode_options["language"] = max(probs, key=probs.get)
if verbose is not None:
print(
f"Detected language: {LANGUAGES[decode_options['language']].title()}"
)
language: str = decode_options["language"]
task: str = decode_options.get("task", "transcribe")
tokenizer = get_tokenizer(
model.is_multilingual,
num_languages=model.num_languages,
language=language,
task=task,
)
if isinstance(clip_timestamps, str):
clip_timestamps = [
float(ts) for ts in (clip_timestamps.split(",") if clip_timestamps else [])
]
seek_points: List[int] = [round(ts * FRAMES_PER_SECOND) for ts in clip_timestamps]
if len(seek_points) == 0:
seek_points.append(0)
if len(seek_points) % 2 == 1:
seek_points.append(content_frames)
seek_clips: List[Tuple[int, int]] = list(zip(seek_points[::2], seek_points[1::2]))
punctuation = "\"'“¿([{-\"'.。,,!!??::”)]}、"
if word_timestamps and task == "translate":
warnings.warn("Word-level timestamps on translations may not be reliable.")
def decode_with_fallback(segment: torch.Tensor) -> DecodingResult:
temperatures = (
[temperature] if isinstance(temperature, (int, float)) else temperature
)
decode_result = None
for t in temperatures:
kwargs = {**decode_options}
if t > 0:
# disable beam_size and patience when t > 0
kwargs.pop("beam_size", None)
kwargs.pop("patience", None)
else:
# disable best_of when t == 0
kwargs.pop("best_of", None)
options = DecodingOptions(**kwargs, temperature=t)
decode_result = model.decode(segment, options)
needs_fallback = False
if (
compression_ratio_threshold is not None
and decode_result.compression_ratio > compression_ratio_threshold
):
needs_fallback = True # too repetitive
if (
logprob_threshold is not None
and decode_result.avg_logprob < logprob_threshold
):
needs_fallback = True # average log probability is too low
if (
no_speech_threshold is not None
and decode_result.no_speech_prob > no_speech_threshold
):
needs_fallback = False # silence
if not needs_fallback:
break
return decode_result
clip_idx = 0
seek = seek_clips[clip_idx][0]
input_stride = exact_div(
N_FRAMES, model.dims.n_audio_ctx
) # mel frames per output token: 2
time_precision = (
input_stride * HOP_LENGTH / SAMPLE_RATE
) # time per output token: 0.02 (seconds)
all_tokens = []
all_segments = []
prompt_reset_since = 0
if initial_prompt is not None:
initial_prompt_tokens = tokenizer.encode(" " + initial_prompt.strip())
all_tokens.extend(initial_prompt_tokens)
else:
initial_prompt_tokens = []
def new_segment(
*, start: float, end: float, tokens: torch.Tensor, result: DecodingResult
):
tokens = tokens.tolist()
text_tokens = [token for token in tokens if token < tokenizer.eot]
return {
"seek": seek,
"start": start,
"end": end,
"text": tokenizer.decode(text_tokens),
"tokens": tokens,
"temperature": result.temperature,
"avg_logprob": result.avg_logprob,
"compression_ratio": result.compression_ratio,
"no_speech_prob": result.no_speech_prob,
}
# show the progress bar when verbose is False (if True, transcribed text will be printed)
with tqdm.tqdm(
total=content_frames, unit="frames", disable=verbose is not False
) as pbar:
last_speech_timestamp = 0.0
# NOTE: This loop is obscurely flattened to make the diff readable.
# A later commit should turn this into a simpler nested loop.
# for seek_clip_start, seek_clip_end in seek_clips:
# while seek < seek_clip_end
while clip_idx < len(seek_clips):
seek_clip_start, seek_clip_end = seek_clips[clip_idx]
if seek < seek_clip_start:
seek = seek_clip_start
if seek >= seek_clip_end:
clip_idx += 1
if clip_idx < len(seek_clips):
seek = seek_clips[clip_idx][0]
continue
time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
window_end_time = float((seek + N_FRAMES) * HOP_LENGTH / SAMPLE_RATE)
segment_size = min(N_FRAMES, content_frames - seek, seek_clip_end - seek)
mel_segment = mel[:, seek : seek + segment_size]
segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE
mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype)
decode_options["prompt"] = all_tokens[prompt_reset_since:]
result: DecodingResult = decode_with_fallback(mel_segment)
tokens = torch.tensor(result.tokens)
if no_speech_threshold is not None:
# no voice activity check
should_skip = result.no_speech_prob > no_speech_threshold
if (
logprob_threshold is not None
and result.avg_logprob > logprob_threshold
):
# don't skip if the logprob is high enough, despite the no_speech_prob
should_skip = False
if should_skip:
seek += segment_size # fast-forward to the next segment boundary
continue
previous_seek = seek
current_segments = []
# anomalous words are very long/short/improbable
def word_anomaly_score(word: dict) -> float:
probability = word.get("probability", 0.0)
duration = word["end"] - word["start"]
score = 0.0
if probability < 0.15:
score += 1.0
if duration < 0.133:
score += (0.133 - duration) * 15
if duration > 2.0:
score += duration - 2.0
return score
def is_segment_anomaly(segment: Optional[dict]) -> bool:
if segment is None or not segment["words"]:
return False
words = [w for w in segment["words"] if w["word"] not in punctuation]
words = words[:8]
score = sum(word_anomaly_score(w) for w in words)
return score >= 3 or score + 0.01 >= len(words)
def next_words_segment(segments: List[dict]) -> Optional[dict]:
return next((s for s in segments if s["words"]), None)
timestamp_tokens: torch.Tensor = tokens.ge(tokenizer.timestamp_begin)
single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True]
consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0]
consecutive.add_(1)
if len(consecutive) > 0:
# if the output contains two consecutive timestamp tokens
slices = consecutive.tolist()
if single_timestamp_ending:
slices.append(len(tokens))
last_slice = 0
for current_slice in slices:
sliced_tokens = tokens[last_slice:current_slice]
start_timestamp_pos = (
sliced_tokens[0].item() - tokenizer.timestamp_begin
)
end_timestamp_pos = (
sliced_tokens[-1].item() - tokenizer.timestamp_begin
)
current_segments.append(
new_segment(
start=time_offset + start_timestamp_pos * time_precision,
end=time_offset + end_timestamp_pos * time_precision,
tokens=sliced_tokens,
result=result,
)
)
last_slice = current_slice
if single_timestamp_ending:
# single timestamp at the end means no speech after the last timestamp.
seek += segment_size
else:
# otherwise, ignore the unfinished segment and seek to the last timestamp
last_timestamp_pos = (
tokens[last_slice - 1].item() - tokenizer.timestamp_begin
)
seek += last_timestamp_pos * input_stride
else:
duration = segment_duration
timestamps = tokens[timestamp_tokens.nonzero().flatten()]
if (
len(timestamps) > 0
and timestamps[-1].item() != tokenizer.timestamp_begin
):
# no consecutive timestamps but it has a timestamp; use the last one.
last_timestamp_pos = (
timestamps[-1].item() - tokenizer.timestamp_begin
)
duration = last_timestamp_pos * time_precision
current_segments.append(
new_segment(
start=time_offset,
end=time_offset + duration,
tokens=tokens,
result=result,
)
)
seek += segment_size
if word_timestamps:
add_word_timestamps(
segments=current_segments,
model=model,
tokenizer=tokenizer,
mel=mel_segment,
num_frames=segment_size,
prepend_punctuations=prepend_punctuations,
append_punctuations=append_punctuations,
last_speech_timestamp=last_speech_timestamp,
)
if not single_timestamp_ending:
last_word_end = get_end(current_segments)
if last_word_end is not None and last_word_end > time_offset:
seek = round(last_word_end * FRAMES_PER_SECOND)
# skip silence before possible hallucinations
if hallucination_silence_threshold is not None:
threshold = hallucination_silence_threshold
if not single_timestamp_ending:
last_word_end = get_end(current_segments)
if last_word_end is not None and last_word_end > time_offset:
remaining_duration = window_end_time - last_word_end
if remaining_duration > threshold:
seek = round(last_word_end * FRAMES_PER_SECOND)
else:
seek = previous_seek + segment_size
# if first segment might be a hallucination, skip leading silence
first_segment = next_words_segment(current_segments)
if first_segment is not None and is_segment_anomaly(first_segment):
gap = first_segment["start"] - time_offset
if gap > threshold:
seek = previous_seek + round(gap * FRAMES_PER_SECOND)
continue
# skip silence before any possible hallucination that is surrounded
# by silence or more hallucinations
hal_last_end = last_speech_timestamp
for si in range(len(current_segments)):
segment = current_segments[si]
if not segment["words"]:
continue
if is_segment_anomaly(segment):
next_segment = next_words_segment(
current_segments[si + 1 :]
)
if next_segment is not None:
hal_next_start = next_segment["words"][0]["start"]
else:
hal_next_start = time_offset + segment_duration
silence_before = (
segment["start"] - hal_last_end > threshold
or segment["start"] < threshold
or segment["start"] - time_offset < 2.0
)
silence_after = (
hal_next_start - segment["end"] > threshold
or is_segment_anomaly(next_segment)
or window_end_time - segment["end"] < 2.0
)
if silence_before and silence_after:
seek = round(
max(time_offset + 1, segment["start"])
* FRAMES_PER_SECOND
)
if content_duration - segment["end"] < threshold:
seek = content_frames
current_segments[si:] = []
break
hal_last_end = segment["end"]
last_word_end = get_end(current_segments)
if last_word_end is not None:
last_speech_timestamp = last_word_end
if verbose:
for segment in current_segments:
start, end, text = segment["start"], segment["end"], segment["text"]
line = f"[{format_timestamp(start)} --> {format_timestamp(end)}] {text}"
print(make_safe(line))
# if a segment is instantaneous or does not contain text, clear it
for i, segment in enumerate(current_segments):
if segment["start"] == segment["end"] or segment["text"].strip() == "":
segment["text"] = ""
segment["tokens"] = []
segment["words"] = []
all_segments.extend(
[
{"id": i, **segment}
for i, segment in enumerate(
current_segments, start=len(all_segments)
)
]
)
all_tokens.extend(
[token for segment in current_segments for token in segment["tokens"]]
)
if not condition_on_previous_text or result.temperature > 0.5:
# do not feed the prompt tokens if a high temperature was used
prompt_reset_since = len(all_tokens)
# update progress bar
pbar.update(min(content_frames, seek) - previous_seek)
return dict(
text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]),
segments=all_segments,
language=language,
) |
Apply a median filter of given width along the last dimension of x | def median_filter_cuda(x: torch.Tensor, filter_width: int):
"""Apply a median filter of given width along the last dimension of x"""
slices = x.contiguous().unfold(-1, filter_width, 1)
grid = np.prod(slices.shape[:-2])
kernel = median_kernel(filter_width)
y = torch.empty_like(slices[..., 0])
BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length()
kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE)
return y |
Returns the names of available models | def available_models() -> List[str]:
"""Returns the names of available models"""
return list(_MODELS.keys()) |
Load a Whisper ASR model
Parameters
----------
name : str
one of the official model names listed by `whisper.available_models()`, or
path to a model checkpoint containing the model dimensions and the model state_dict.
device : Union[str, torch.device]
the PyTorch device to put the model into
download_root: str
path to download the model files; by default, it uses "~/.cache/whisper"
in_memory: bool
whether to preload the model weights into host memory
Returns
-------
model : Whisper
The Whisper ASR model instance | def load_model(
name: str,
device: Optional[Union[str, torch.device]] = None,
download_root: str = None,
in_memory: bool = False,
) -> Whisper:
"""
Load a Whisper ASR model
Parameters
----------
name : str
one of the official model names listed by `whisper.available_models()`, or
path to a model checkpoint containing the model dimensions and the model state_dict.
device : Union[str, torch.device]
the PyTorch device to put the model into
download_root: str
path to download the model files; by default, it uses "~/.cache/whisper"
in_memory: bool
whether to preload the model weights into host memory
Returns
-------
model : Whisper
The Whisper ASR model instance
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if download_root is None:
default = os.path.join(os.path.expanduser("~"), ".cache")
download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")
if name in _MODELS:
checkpoint_file = _download(_MODELS[name], download_root, in_memory)
alignment_heads = _ALIGNMENT_HEADS[name]
elif os.path.isfile(name):
checkpoint_file = open(name, "rb").read() if in_memory else name
alignment_heads = None
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
with (
io.BytesIO(checkpoint_file) if in_memory else open(checkpoint_file, "rb")
) as fp:
checkpoint = torch.load(fp, map_location=device)
del checkpoint_file
dims = ModelDimensions(**checkpoint["dims"])
model = Whisper(dims)
model.load_state_dict(checkpoint["model_state_dict"])
if alignment_heads is not None:
model.set_alignment_heads(alignment_heads)
return model.to(device) |
Replace any other markers, symbols, and punctuations with a space,
and drop any diacritics (category 'Mn' and some manual mappings) | def remove_symbols_and_diacritics(s: str, keep=""):
"""
Replace any other markers, symbols, and punctuations with a space,
and drop any diacritics (category 'Mn' and some manual mappings)
"""
return "".join(
c
if c in keep
else ADDITIONAL_DIACRITICS[c]
if c in ADDITIONAL_DIACRITICS
else ""
if unicodedata.category(c) == "Mn"
else " "
if unicodedata.category(c)[0] in "MSP"
else c
for c in unicodedata.normalize("NFKD", s)
) |
Replace any other markers, symbols, punctuations with a space, keeping diacritics | def remove_symbols(s: str):
"""
Replace any other markers, symbols, punctuations with a space, keeping diacritics
"""
return "".join(
" " if unicodedata.category(c)[0] in "MSP" else c
for c in unicodedata.normalize("NFKC", s)
) |
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window. | def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
# CREDITS: https://github.com/karpathy/minGPT/blob/master/mingpt/utils.py
def top_k_logits(logits, k):
v, _ = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float("Inf")
return out
for _ in range(steps):
x_cond = (
x if x.size(1) <= block_size else x[:, -block_size:]
) # crop context if needed
logits = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x[0] |
Allocate a cache to be used with the Transformer module.
Args:
args (ModelArgs): the model configuration.
length (int): per layer cache size.
It is usually budgeted as ``max_batch * max_seq``
device (torch.device, optional): the device on which
the cache should be allocated.
n_layers (int, optional): the number of layers to
allocate a cache for (defaults to the model
settings).
dtype (torch.dtype, optional): the dtype to use for
cache entries (defaults to the default dtype).
Returns:
The cache object to pass to ``Tranformer.forward``. | def make_cache(
args: ModelArgs,
length: int,
device: Optional[Union[str, torch.device]] = None,
n_layers: Optional[int] = None,
dtype: Optional[torch.dtype] = None,
) -> list[LayerCache]:
"""
Allocate a cache to be used with the Transformer module.
Args:
args (ModelArgs): the model configuration.
length (int): per layer cache size.
It is usually budgeted as ``max_batch * max_seq``
device (torch.device, optional): the device on which
the cache should be allocated.
n_layers (int, optional): the number of layers to
allocate a cache for (defaults to the model
settings).
dtype (torch.dtype, optional): the dtype to use for
cache entries (defaults to the default dtype).
Returns:
The cache object to pass to ``Tranformer.forward``.
"""
head_dim = args.dim // args.n_heads
n_kv_heads = args.n_kv_heads
if n_kv_heads is None:
n_kv_heads = args.n_heads
n_local_kv_heads = n_kv_heads // mp_utils.get_world_size()
if n_layers is None:
n_layers = args.n_layers
shape = (1, length, n_local_kv_heads, 1, head_dim)
heads_per_group = args.n_heads // n_kv_heads
expansion = (-1, -1, -1, heads_per_group, -1)
return [
(
torch.zeros(shape, device=device, dtype=dtype).expand(expansion),
torch.zeros(shape, device=device, dtype=dtype).expand(expansion),
)
for _ in range(n_layers)
] |
Take a prefix view of a larger cache.
The original cache object remains of identical size and valid
after the shrinked alias has been used. This function is useful
when a cache was allocated for a larger batch size than what is
necessary.
Args:
cache: the cache to take a view in.
length (int): the desired length
Returns:
A view in the input cache object. | def cache_prefix(cache: list[LayerCache], length: int) -> list[LayerCache]:
"""
Take a prefix view of a larger cache.
The original cache object remains of identical size and valid
after the shrinked alias has been used. This function is useful
when a cache was allocated for a larger batch size than what is
necessary.
Args:
cache: the cache to take a view in.
length (int): the desired length
Returns:
A view in the input cache object.
"""
if len(cache) > 0:
assert cache[0][0].shape[1] >= length
return [(ck[:, :length], cv[:, :length]) for ck, cv in cache] |
Initialize model parallelism support.
Args:
world_size (int): the number of processes running on
the current node available for model parallelism.
local_rank (int): the present process' rank.
group (torch.distributed.ProcessGroup, optional): the
process group to use for model parallel communications.
use_gpu (bool, optional): whether computations are
happening on a GPU or not (defaults to True).
seed (int, optional): the seed used to seed the prng
on all model parallel processes
Returns
The pytorch device to use in the present process.
Note:
If ``group`` is not specified, the default process group is
used for model parallelism. This means that the present
module may be incompatible with other forms of parallelism
such as data parallelism. | def initialize(
world_size: int,
local_rank: int,
group: Optional[ProcessGroup] = None,
use_gpu: bool = True,
seed: int = 80486,
) -> str:
"""
Initialize model parallelism support.
Args:
world_size (int): the number of processes running on
the current node available for model parallelism.
local_rank (int): the present process' rank.
group (torch.distributed.ProcessGroup, optional): the
process group to use for model parallel communications.
use_gpu (bool, optional): whether computations are
happening on a GPU or not (defaults to True).
seed (int, optional): the seed used to seed the prng
on all model parallel processes
Returns
The pytorch device to use in the present process.
Note:
If ``group`` is not specified, the default process group is
used for model parallelism. This means that the present
module may be incompatible with other forms of parallelism
such as data parallelism.
"""
global _GROUP
global _WORLD_SIZE
global _LOCAL_RANK
assert local_rank < world_size
if use_gpu:
device = f"cuda:{local_rank}"
torch.cuda.set_device(local_rank)
else:
device = "cpu"
if group is None:
if "MASTER_ADDR" not in os.environ:
assert world_size == 1
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "1234"
torch.distributed.init_process_group(
backend="nccl" if use_gpu else "gloo",
init_method="env://",
world_size=world_size,
rank=local_rank,
)
_GROUP = group
_WORLD_SIZE = world_size
_LOCAL_RANK = local_rank
torch.manual_seed(seed)
return device |
Gather a tensor of shape (n, m) into a tensor of shape (n, mp_size * m). | def all_gather(x: torch.Tensor) -> torch.Tensor:
"""
Gather a tensor of shape (n, m) into a tensor of shape (n, mp_size * m).
"""
mp_size = get_world_size()
if mp_size == 1:
return x
gather = [torch.empty_like(x) for _ in range(mp_size)]
torch.distributed.all_gather(gather, x, group=_GROUP)
return torch.cat(gather, dim=-1) |
Perform top-p (nucleus) sampling on a probability distribution.
Args:
probs (torch.Tensor): probability distribution tensor.
p (float): probability threshold for top-p sampling.
Returns:
torch.Tensor: sampled token indices.
Note:
Top-p sampling selects the smallest set of tokens whose cumulative
probability mass exceeds the threshold p. The distribution is
renormalized based on the selected tokens. | def top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
"""
Perform top-p (nucleus) sampling on a probability distribution.
Args:
probs (torch.Tensor): probability distribution tensor.
p (float): probability threshold for top-p sampling.
Returns:
torch.Tensor: sampled token indices.
Note:
Top-p sampling selects the smallest set of tokens whose cumulative
probability mass exceeds the threshold p. The distribution is
renormalized based on the selected tokens.
"""
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
next_token = torch.multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token |
Return whether we are at an exact version (namely the version variable). | def get_tagged_version() -> Optional[str]:
"""
Return whether we are at an exact version (namely the version variable).
"""
try:
tag = subprocess.check_output(
["git", "describe", "--tags", "--exact-match", "HEAD"],
text=True,
stderr=subprocess.DEVNULL,
).strip()
except subprocess.CalledProcessError: # no tag
return None
if not tag.startswith("v"):
return None
return tag[1:] |
Make sure that the causal flag is respected.
The input data is orthogonal by design if causal is respected, but if the attention looks ahead this will fail | def test_causal(
attention_name: str,
heads: int,
):
"""
Make sure that the causal flag is respected.
The input data is orthogonal by design if causal is respected, but if the attention looks ahead this will fail
"""
torch.random.manual_seed(42)
device = torch.device("cuda")
multi_head = _get_multihead(
attention_name,
0.0,
0.0,
causal=True,
heads=heads,
device=device,
skip_output_projection=True,
)
k = (
torch.tril(torch.ones((SEQ, SEQ), device=device), diagonal=0)
.unsqueeze(0)
.expand(1, -1, -1)
)
q = (
torch.triu(torch.ones((SEQ, SEQ), device=device), diagonal=0)
.unsqueeze(0)
.expand(1, -1, -1)
)
v = (
torch.arange(SEQ, device=device)
.float()
.unsqueeze(0)
.unsqueeze(-1)
.expand(1, -1, SEQ)
)
# Make sure that we don´t project, to keep the embeddings orthogonal
multi_head.attention.requires_input_projection = False
res = multi_head(query=q, key=k, value=v).squeeze(0)
# Consolidate along the embedding, if causal was respected the amplitude should be sorted already
res_sum = torch.sum(res, dim=1).cpu()
assert torch.allclose(torch.sort(res_sum)[1], torch.arange(SEQ)) or torch.allclose(
torch.sort(res_sum, descending=True)[1], torch.arange(SEQ)
), res_sum |
LSE can be padded, let's remove the padding | def _block_diag_reshape_lse(
lse: torch.Tensor, q_seqinfo: fmha.attn_bias._SeqLenInfo
) -> torch.Tensor:
"""LSE can be padded, let's remove the padding"""
parts = []
for slice, (start, end) in zip(lse.unbind(0), q_seqinfo.intervals()):
parts.append(slice[:, : end - start])
return torch.cat(parts, dim=1).unsqueeze(1) |
vectorized implementation of scipy.stats.binom_test
this makes our tests much faster
reference: https://github.com/scipy/scipy/blob/v1.8.0/scipy/stats/_morestats.py#L2609-L2702 | def _vec_binom_test(x, n, p):
"""
vectorized implementation of scipy.stats.binom_test
this makes our tests much faster
reference: https://github.com/scipy/scipy/blob/v1.8.0/scipy/stats/_morestats.py#L2609-L2702
"""
import numpy as np
from scipy.stats import distributions
x = np.atleast_1d(x)
d = distributions.binom.pmf(x, n, p)[:, None]
rerr = 1 + 1e-7
# x < p * n case
i = np.arange(np.ceil(p * n), n + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d * rerr, axis=1)
pval1 = distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)
# other case
i = np.arange(np.floor(p * n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d * rerr, axis=1)
pval2 = distributions.binom.cdf(y - 1, n, p) + distributions.binom.sf(x - 1, n, p)
pval = np.where(x < p * n, pval1, pval2)
pval = np.minimum(1.0, pval)
return pval |
IMPORTANT:
This is the example in the doc for `BlockDiagonalMask`.
If this example needs to be updated, please also update the doc | def test_attn_bias_blockdiag_doc() -> None:
"""IMPORTANT:
This is the example in the doc for `BlockDiagonalMask`.
If this example needs to be updated, please also update the doc
"""
import torch
from xformers.ops import fmha
if torch.version.hip:
pytest.skip("backward pass/gradience is not yet supported by ck-tiled fmha!")
K = 16
dtype = torch.float16
device = "cuda"
list_x = [
torch.randn([1, 3, 1, K], dtype=dtype, device=device),
torch.randn([1, 6, 1, K], dtype=dtype, device=device),
torch.randn([1, 2, 1, K], dtype=dtype, device=device),
]
attn_bias, x = fmha.BlockDiagonalMask.from_tensor_list(list_x)
linear = torch.nn.Linear(K, K * 3).to(device=device, dtype=dtype) # type: ignore
q, k, v = linear(x).reshape([1, -1, 1, 3, K]).unbind(-2)
out = fmha.memory_efficient_attention(q, k, v, attn_bias=attn_bias)
list_out = attn_bias.split(out)
assert tuple(list_out[0].shape) == (1, 3, 1, K) |
This tests some internals of the cutlassB kernel
We test the iteration across blocks of [queries, keys] to ensure
that we correctly:
* Iterate over all the blocks that should be iterated
* Do *not* iterate over blocks that are completely masked out
* Correctly compute the number of parallel blocks that will compute
the same block of dQ
.. and we test this across variable causal masks+local attention combinations | def test_cutlassB_iter_order(
dtype,
cc: int,
maxK: int,
num_queries: int,
num_keys: int,
custom_mask_type,
window_size,
) -> None:
"""
This tests some internals of the cutlassB kernel
We test the iteration across blocks of [queries, keys] to ensure
that we correctly:
* Iterate over all the blocks that should be iterated
* Do *not* iterate over blocks that are completely masked out
* Correctly compute the number of parallel blocks that will compute
the same block of dQ
.. and we test this across variable causal masks+local attention combinations
"""
if (
window_size > 0
and custom_mask_type == fmha.cutlass._CustomMaskType.NoCustomMask
):
pytest.skip("LocalAttention is only supported for causal")
get_iteration_data = partial(
torch.ops.xformers._cutlassB_iteration_data,
dtype=dtype,
cc=cc,
maxK=maxK,
num_queries=num_queries,
num_keys=num_keys,
custom_mask_type=custom_mask_type,
window_size=window_size,
)
bias = torch.zeros([num_queries, num_keys], dtype=torch.float32)
if custom_mask_type != fmha.cutlass._CustomMaskType.NoCustomMask:
bias = fmha.attn_bias._materialize_causal_mask(
(num_queries, num_keys),
dtype=torch.float32,
device="cpu",
window_size=None if window_size == 0 else window_size,
from_bottomright=(
custom_mask_type == fmha.cutlass._CustomMaskType.CausalFromBottomRight
),
)
block_queries, block_keys = get_iteration_data()[:2]
mask_pooled = (
F.max_pool2d(bias.unsqueeze(0), (block_queries, block_keys), ceil_mode=True)
== 0
).int()[0]
attn_computed = torch.zeros_like(mask_pooled)
for key_start in range(0, num_keys, block_keys):
it = 0
new_key_start = key_start
new_query_start = get_iteration_data(key_start=key_start)[2]
try:
expected_first_query = (
mask_pooled[:, key_start // block_keys].tolist().index(1)
* block_queries
)
assert (
new_query_start == expected_first_query
), f"Wrong first query for K={key_start}: {new_query_start} (expected {expected_first_query})"
except ValueError: # Nothing to compute in this column
pass
while new_key_start == key_start and new_query_start < num_queries:
query_start = new_query_start
attn_computed[query_start // block_queries, key_start // block_keys] += 1
# print(f"Compute [{query_start}, {key_start}]")
# Is there something to compute here?
assert mask_pooled[
query_start // block_queries, key_start // block_keys
].item(), "Computing a block that is not needed!"
new_query_start, new_key_start = get_iteration_data(
key_start=key_start, query_start=query_start
)[3:5]
it += 1
assert it < num_queries, ""
assert (attn_computed == mask_pooled)[
:, key_start // block_keys
].all(), "some blocks were not computed!"
# Now check that the number returned by `getNumParallelBlocksForQuery` is correct
for query_start in range(0, num_queries, block_queries):
num_parallel_blocks = get_iteration_data(
query_start=query_start, num_splits_key=num_keys
)[5]
num_actual = mask_pooled[query_start // block_queries].sum().item()
assert num_parallel_blocks == num_actual |
Merging the same attention twice shouldn't change anything.
This also tests the shape of the lse output of each permitted op. | def test_merge_attentions_nobias(
write_lse: bool,
stack_inputs: bool,
op: Type[AttentionFwOpBase],
G: Optional[int],
H: int,
):
"""
Merging the same attention twice shouldn't change anything.
This also tests the shape of the lse output of each permitted op.
"""
B, M, Mq, K = 13, 5, 3, 128
if op is None or torch.bfloat16 in op.SUPPORTED_DTYPES:
dtype = torch.bfloat16
else:
dtype = next(iter(op.SUPPORTED_DTYPES))
if G is None:
q = 3 * torch.rand(B, Mq, H, K, dtype=dtype, device="cuda")
k = (3 * torch.rand(B, M, 1, K, dtype=dtype, device="cuda")).expand(B, M, H, K)
v = (3 * torch.rand(B, M, 1, K, dtype=dtype, device="cuda")).expand(B, M, H, K)
else:
q = 3 * torch.rand(B, Mq, G, H, K, dtype=dtype, device="cuda")
k = (3 * torch.rand(B, M, G, 1, K, dtype=dtype, device="cuda")).expand(
B, M, G, H, K
)
v = (3 * torch.rand(B, M, G, 1, K, dtype=dtype, device="cuda")).expand(
B, M, G, H, K
)
out1, lse1 = fmha.memory_efficient_attention_partial(q, k, v, op=op)
assert out1.shape == q.shape
M_ceil = lse1.shape[-1]
assert M_ceil >= Mq
assert lse1.shape == (B, H, M_ceil) if G is None else (B, G, H, M_ceil)
lse1 = lse1[..., :Mq]
attn_chunks = [out1, out1]
lse_chunks = [lse1, lse1]
attn_chunks_ = torch.stack(attn_chunks) if stack_inputs else attn_chunks
lse_chunks_ = torch.stack(lse_chunks) if stack_inputs else lse_chunks
out, lse = fmha.merge_attentions(attn_chunks_, lse_chunks_, write_lse=write_lse) # type: ignore
assert out.shape == out1.shape
assert_allclose(out1, out, rtol=1e-3, atol=1e-3, msg="out")
if write_lse:
assert lse is not None
assert lse.shape[:-1] == lse1.shape[:-1]
assert_allclose(
lse1[..., :Mq] + math.log(2), lse[..., :Mq], rtol=1e-3, atol=1e-3, msg="lse"
)
else:
assert lse is None |
Compute decoding attention on chunks of K/V and merge them together.
Compare with computing attention on the whole K/V. | def test_merge_attentions_decoding(
dtype: torch.dtype,
op: Type[AttentionFwOpBase],
num_queries: int,
bmghk: bool,
stack_inputs: bool,
):
"""
Compute decoding attention on chunks of K/V and merge them together.
Compare with computing attention on the whole K/V.
"""
MAX_T = 8192
B = 128
N_H_L = 8
D_H = 128
G = 2 if bmghk else 1
torch.manual_seed(1)
output_dtype = torch.float32 if op.SUPPORTS_OUTPUT_DTYPE else None
num_chunks = 10
chunk_starts = sorted(
torch.randint(low=1, high=MAX_T // 2, size=(num_chunks,)).tolist()
)
chunk_starts[0] = 0
chunk_starts.append(MAX_T)
# We construct sequences so that even the last chunk has a non-empty part of every sequence
# as long as the number of queries.
# Otherwise the corresponding LSE will be -inf and that'll propagate to the whole sum.
# It is possible to teach the kernel to ignore infinite LSEs, but in practical use cases
# of merging attention, e.g. a batch of sequences with a common prefix, this condition should be satisfied.
k_lens = torch.randint(
low=chunk_starts[-2] + num_queries, high=MAX_T, size=(B,)
).tolist()
q_lens = [num_queries] * B
B_T = num_queries * B
q = torch.randn((1, B_T, G, N_H_L, D_H), dtype=dtype, device="cuda")
k = torch.randn((B, MAX_T, G, 1, D_H), dtype=dtype, device="cuda")
v = torch.randn_like(k)
if not bmghk:
q = q[:, :, 0]
# Compute per-chunk attention
chunks_output = []
for i in range(num_chunks):
chunk_start, chunk_end = chunk_starts[i], chunk_starts[i + 1]
k_chunk = k[:, chunk_start:chunk_end, ...]
v_chunk = v[:, chunk_start:chunk_end, ...]
axk = k_chunk.reshape(-1, G, 1, D_H).expand(1, -1, G, N_H_L, D_H)
axv = v_chunk.reshape(-1, G, 1, D_H).expand(1, -1, G, N_H_L, D_H)
if not bmghk:
axk = axk[:, :, 0]
axv = axv[:, :, 0]
bias_type = fmha.attn_bias.BlockDiagonalPaddedKeysMask
if i + 1 == num_chunks:
bias_type = fmha.attn_bias.BlockDiagonalCausalWithOffsetPaddedKeysMask
attn_bias = bias_type.from_seqlens(
q_seqlen=q_lens,
kv_padding=chunk_end - chunk_start,
kv_seqlen=[max(min(x, chunk_end) - chunk_start, 0) for x in k_lens],
)
attn_chunk, lse_chunk = fmha.memory_efficient_attention_partial(
q,
axk,
axv,
attn_bias,
op=op,
output_dtype=output_dtype,
)
if bmghk:
assert attn_chunk.shape == (1, B_T, G, N_H_L, D_H)
assert lse_chunk.shape == (1, G, N_H_L, B_T)
else:
assert attn_chunk.shape == (1, B_T, N_H_L, D_H)
assert lse_chunk.shape == (1, N_H_L, B_T)
chunks_output.append((attn_chunk, lse_chunk))
# Merge attention from all chunks
attn_split = [attn_chunk for attn_chunk, _ in chunks_output]
lse_split = [lse_chunk for _, lse_chunk in chunks_output]
attn_split_ = torch.stack(attn_split) if stack_inputs else attn_split
lse_split_ = torch.stack(lse_split) if stack_inputs else lse_split
attn_out, lse_out = fmha.merge_attentions(
attn_split_, lse_split_, output_dtype=dtype # type: ignore
)
assert lse_out is not None
# Compute attention on the full K/V
attn_bias = fmha.attn_bias.BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
q_seqlen=q_lens,
kv_padding=MAX_T,
kv_seqlen=k_lens,
)
axk = k.view(1, -1, G, 1, D_H).expand(1, -1, G, N_H_L, D_H)
axv = v.view(1, -1, G, 1, D_H).expand(1, -1, G, N_H_L, D_H)
if not bmghk:
axk = axk[:, :, 0]
axv = axv[:, :, 0]
attn_full, lse_full = fmha.memory_efficient_attention_forward_requires_grad(
q,
axk,
axv,
attn_bias,
op=op,
output_dtype=output_dtype,
)
assert_allclose(
lse_out.to(lse_full.dtype), lse_full, rtol=1e-3, atol=1e-3, msg="lse"
)
assert_allclose(
attn_out.to(attn_full.dtype), attn_full, rtol=1e-3, atol=1e-3, msg="out"
)
attn_full2 = fmha.memory_efficient_attention_forward(
q,
axk,
axv,
attn_bias,
op=op,
output_dtype=output_dtype,
)
assert_allclose(attn_full2, attn_full, rtol=1e-3, atol=1e-3, msg="out2") |
attn_split: [split_k, B, M, (G,) H, Kq]
lse_split: [split_k, B, (G,) H, M] | def _merge_attentions_ref(attn_split, lse_split):
"""
attn_split: [split_k, B, M, (G,) H, Kq]
lse_split: [split_k, B, (G,) H, M]
"""
is_bmghk = len(attn_split.shape) == 6
if not is_bmghk:
attn_split = attn_split.unsqueeze(3)
lse_split = lse_split.unsqueeze(2)
lse_split = lse_split[..., None].moveaxis(4, 2) # [split_k, B, M, G, H, 1]
lse_max, _ = torch.max(lse_split, dim=0) # [B, M, G, H, 1]
sumexp_normalized = torch.exp(lse_split - lse_max) # [split_k, B, M, G, H, 1]
denominator = sumexp_normalized.sum(dim=0) # [B, M, G, H, 1]
numerator = (sumexp_normalized * attn_split).sum(dim=0) # [B, M, G, H, K]
attn_out = numerator / denominator # [B, M_ceil, G, H, Kq]
lse_out = lse_max + torch.log(denominator)
lse_out = lse_out.squeeze(4).permute(0, 2, 3, 1) # [B, G, H, M]
if not is_bmghk:
attn_out = attn_out.squeeze(2)
lse_out = lse_out.squeeze(1)
return attn_out, lse_out |
Simple rope calculation of rope of one tensor
Args:
x: input, shape (B, M, H, K).
seqpos: gives the position of each sequence element in x in its sequence
(shape (M,)). | def _slow_rope(
x: torch.Tensor,
*,
seqpos: Optional[torch.Tensor] = None,
theta=10000,
adjacents: bool = True,
):
"""
Simple rope calculation of rope of one tensor
Args:
x: input, shape (B, M, H, K).
seqpos: gives the position of each sequence element in x in its sequence
(shape (M,)).
"""
x_shape = x.shape
dim = x_shape[-1]
seq_dim = 1
M = x_shape[seq_dim]
assert dim % 2 == 0
if seqpos is None:
seqpos = torch.arange(M, device=x.device)
power = torch.arange(0, dim, 2, device=x.device)[: (dim // 2)].float() / dim
freqs = 1.0 / (theta**power)
all_freqs = torch.outer(seqpos, freqs)
freqs_cis = torch.polar(torch.ones_like(all_freqs), all_freqs) # complex64
for _ in range(x.ndim - seq_dim - 2):
freqs_cis = freqs_cis[:, None]
if adjacents:
x_reshaped = x.float().unflatten(-1, (-1, 2))
x_ = torch.view_as_complex(x_reshaped)
x_out = torch.view_as_real(x_ * freqs_cis)
else:
x_reshaped = x.float().unflatten(-1, (2, -1)).transpose(-1, -2).contiguous()
x_ = torch.view_as_complex(x_reshaped)
x_out = torch.view_as_real(x_ * freqs_cis)
x_out = x_out.transpose(-1, -2)
return x_out.flatten(-2).type_as(x) |
More flexible unused version of _slow_rope
- allows varying dtypes. | def _slow_rope2(
x: torch.Tensor,
*,
seqpos: Optional[torch.Tensor] = None,
theta=10000,
adjacents: bool = True,
):
"""
More flexible unused version of _slow_rope
- allows varying dtypes.
"""
internal_dtype = torch.float64
dim = x.shape[-1]
seq_dim = 1
M = x.shape[seq_dim]
assert dim % 2 == 0
if seqpos is None:
seqpos = torch.arange(M, device=x.device)
power = (
torch.arange(0, dim, 2, device=x.device)[: (dim // 2)].to(internal_dtype) / dim
)
# freqs = 1.0 / (theta**power)
freqs = theta**-power
f = torch.outer(seqpos, freqs)
for _ in range(x.ndim - seq_dim - 2):
f = f[:, None]
if adjacents:
x1, x2 = x.to(internal_dtype).unflatten(-1, (-1, 2)).unbind(-1)
y1 = x1 * f.cos() - x2 * f.sin()
y2 = x1 * f.sin() + x2 * f.cos()
x_out = torch.stack([y1, y2], -1)
else:
x1, x2 = x.to(internal_dtype).unflatten(-1, (2, -1)).unbind(-2)
y1 = x1 * f.cos() - x2 * f.sin()
y2 = x1 * f.sin() + x2 * f.cos()
x_out = torch.stack([y1, y2], -2)
return x_out.flatten(-2).type_as(x) |
Improved version of
```
assert torch.allclose(out, ref)
```
Except that we provide useful error message, and also compare
to the output of the f32 calculation. | def assert_allclose(
# The output of the tested function
out: torch.Tensor,
# The output of the reference implementation
ref: torch.Tensor,
# The output of the reference implementation in f32
ref32: Optional[torch.Tensor] = None,
msg: str = "failed",
atol: Optional[float] = None,
rtol: Optional[float] = None,
) -> None:
"""
Improved version of
```
assert torch.allclose(out, ref)
```
Except that we provide useful error message, and also compare
to the output of the f32 calculation.
"""
out = out.float()
ref = ref.float()
if atol is None:
atol = 1e-8
if rtol is None:
rtol = 1e-5
assert out.shape == ref.shape
compare_to = ref32 if ref32 is not None else ref
assert out.shape == compare_to.shape
if torch.allclose(out, ref, rtol=rtol, atol=atol) or (
ref32 is not None and torch.allclose(out, ref32, rtol=rtol, atol=atol)
):
return
flatten_diff = ((out - compare_to).abs() - atol - compare_to.abs() * rtol).flatten()
max_pos = flatten_diff.argmax()
if ref32 is not None:
flatten_diff_vsf32 = ((ref - ref32).abs() - atol - ref32.abs() * rtol).flatten()
max_pos_vsf32 = flatten_diff_vsf32.argmax()
assert False, (
f"{msg}: "
f"out={out.flatten()[max_pos]} and ref32={compare_to.flatten()[max_pos]} (diff={flatten_diff[max_pos]} > 0)"
f"/ atol={atol}, rtol={rtol}.\n"
f"NOTE: ref vs ref_f32:\n"
f"ref={ref.flatten()[max_pos_vsf32]} and ref32={ref32.flatten()[max_pos_vsf32]} "
f"(diff={flatten_diff_vsf32[max_pos_vsf32]})"
)
else:
assert False, (
f"{msg}: "
f"out={out.flatten()[max_pos]} and ref={compare_to.flatten()[max_pos]} (diff={flatten_diff[max_pos]} > 0)"
f"/ atol={atol}, rtol={rtol}"
) |
Produce lhs, rhs and reference output tensors
To dodge numerical accuracy differences between our kernels and PyTorch's
ones, we avoid random values and construct matrices whose product is an
exact mathematical computation, specifically: the remainder!
We do it by having the i-th row of lhs and the j-th column on rhs be like:
* lhs: i times "1", followed by "0"
* rhs: j-1 times "1", followed by "-(j-1)", then repeated
The running sum of their pointwise product will thus be:
1, 2, 3, ..., j-1, 0, 1, 2, 3, ... and so on
And the final value will be remainder of i by j.
If K is smaller than M and/or N, this function also takes care of repeating
some rows and/or columns in order to "fill" M and/or K. Similarly, if the
precision of the dtype is too low to store the result without losses, the
function will only use small-enough values, and repeat them as needed.
Finally, the function permutes the rows and columns, in order to avoid a
predictable block structure. | def make_operands(m, n, k, *, dtype):
"""Produce lhs, rhs and reference output tensors
To dodge numerical accuracy differences between our kernels and PyTorch's
ones, we avoid random values and construct matrices whose product is an
exact mathematical computation, specifically: the remainder!
We do it by having the i-th row of lhs and the j-th column on rhs be like:
* lhs: i times "1", followed by "0"
* rhs: j-1 times "1", followed by "-(j-1)", then repeated
The running sum of their pointwise product will thus be:
1, 2, 3, ..., j-1, 0, 1, 2, 3, ... and so on
And the final value will be remainder of i by j.
If K is smaller than M and/or N, this function also takes care of repeating
some rows and/or columns in order to "fill" M and/or K. Similarly, if the
precision of the dtype is too low to store the result without losses, the
function will only use small-enough values, and repeat them as needed.
Finally, the function permutes the rows and columns, in order to avoid a
predictable block structure.
"""
max_value = min(k, int(1 / torch.finfo(dtype).eps) * 2)
m_perm = torch.randperm(m)
n_perm = torch.randperm(n)
num_reps_m = ceil_of_ratio(m, max_value)
lhs = (
torch.ones((min(m, max_value), k), dtype=dtype)
.tril()
.repeat([num_reps_m, 1])[m_perm, :]
)
assert lhs.shape == (m, k)
num_reps_n = ceil_of_ratio(n, max_value)
rhs = torch.ones((k, min(n, max_value)), dtype=dtype)
for i in range(2, min(n, max_value) + 2):
rhs[:, i - 2][i - 1 :: i] = -i + 1
rhs = rhs.repeat([1, num_reps_n])[:, n_perm]
assert rhs.shape == (k, n)
lhs_idxs = torch.arange(1, min(m, max_value) + 1).repeat([num_reps_m])[m_perm, None]
rhs_idxs = torch.arange(2, min(n, max_value) + 2).repeat([num_reps_n])[None, n_perm]
out = torch.remainder(lhs_idxs, rhs_idxs).to(dtype)
assert out.shape == (m, n)
return lhs, rhs, out |
Check some basic dropout properties | def test_dropout(shape, amp, bias, p):
"""
Check some basic dropout properties
"""
torch.random.manual_seed(0)
torch.cuda.manual_seed_all(0)
x = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b = (
torch.normal(0, 1, size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
with autocast(enabled=amp):
tol = 1e-2 if amp else 1e-5 # AMP rounding causes issues, 1e-5 is the default
# Check that 0 means no dropout
y = triton_dropout(x, p=0, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert torch.allclose(x_ref, y, rtol=tol), f"{x[x>y]}"
# Check that 1 means drop all
y = triton_dropout(x, p=1, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert torch.allclose(torch.zeros_like(y), y, rtol=tol)
# Check that .99 means probably dropout
y = triton_dropout(x, p=0.99, bias=b)
x_ref = (x + b if bias else x).to(y.dtype)
assert not torch.allclose(x_ref, y, rtol=tol)
# Check that the drops are different for every row (could catch broken seeds per row)
y = triton_dropout(x, p=0.5)
y = y.flatten(0, 1) if y.ndim == 3 else y
assert not torch.sum(torch.eq(y[0, :] == 0.0, y[1, :] == 0.0)) == y.shape[1]
# Check that the drops are different over time, for the same line
y_a = triton_dropout(x, p=0.5)
y_b = triton_dropout(x, p=0.5)
y_a = y_a.flatten(0, 1) if y_a.ndim == 3 else y_a
y_b = y_b.flatten(0, 1) if y_b.ndim == 3 else y_b
assert (
not torch.sum(torch.eq(y_a[0, :] == 0.0, y_b[0, :] == 0.0)).item()
== y.shape[1]
)
# Check that the drop probability is about right
y = triton_dropout(x, p=p)
drop_p = (y.numel() - y.count_nonzero()) / y.numel()
assert abs(drop_p - p) < 0.02
# Check that the same seeds lead to the same dropout
torch.manual_seed(0)
torch.cuda.manual_seed(0)
y_1 = triton_dropout(x, p=0.5)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
y_2 = triton_dropout(x, p=0.5)
torch.testing.assert_close(y_1, y_2) |
Check some basic dropout properties | def test_dropout_parity(shape, amp, bias, activation, p):
"""
Check some basic dropout properties
"""
torch.random.manual_seed(0)
x = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b = (
torch.ones(size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
torch.random.manual_seed(0)
x_ = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
b_ = (
torch.ones(size=(shape[-1],), device="cuda", requires_grad=True)
if bias
else None
)
with autocast(enabled=amp):
torch_activation = build_activation(activation)
res_torch = torch.nn.functional.dropout(
torch_activation(x + b if b is not None else x), p=p
)
loss_torch = torch.sum(res_torch)
res_triton = triton_dropout(x=x_, p=p, bias=b_, activation=activation)
loss_triton = torch.sum(res_triton)
if p < 0.01:
# Check the FW pass
assert torch.allclose(
loss_torch, loss_triton, rtol=0.01
), f"{loss_torch} - {loss_triton}"
# Check the gradients
loss_torch.backward()
loss_triton.backward()
# - gradients wrt inputs
assert torch.allclose(
torch.norm(x.grad), torch.norm(x_.grad), rtol=0.01
), f"{x.grad}\n{x_.grad}"
# - gradients wrt bias
if bias:
assert torch.allclose(
torch.norm(b.grad), torch.norm(b_.grad), rtol=0.01
), f"{b.grad.norm()} - {b_.grad.norm()}" |
Check that the matrix multiply kernel and Pytorch's give the same results | def test_fused_matmul(shape, dtype):
"""Check that the matrix multiply kernel and Pytorch's give the same results"""
# TODO: fix or remove this
pytest.skip("This is broken")
torch.random.manual_seed(0)
# Raw fused matrix multiply first, to catch gross errors
a = torch.normal(0, 1, size=(shape[-2], shape[-1]), dtype=dtype, device="cuda")
b = torch.normal(0, 1, size=(shape[-1], shape[-2]), dtype=dtype, device="cuda")
# Test that not passing any bias is fine
res_torch = a @ b
res_triton, _ = fused_matmul(
a, b.transpose(0, 1).contiguous(), bias=None, activation=0
)
torch.testing.assert_close(res_torch, res_triton)
# Now test with a real FMA
c = -torch.randn((shape[-2],), dtype=dtype, device="cuda")
res_torch = torch.addmm(c, a, b)
res_triton, _ = fused_matmul(a, b.transpose(1, 0).contiguous(), c)
torch.testing.assert_close(
res_torch,
res_triton,
atol=1e-3,
rtol=1e-3,
msg="Fused matmul broken",
)
# Now check that adding an activation to the mix still produces valid results
# NOTE: SquaredReLU fails, some outlier representation issue but the eyeballed results look reasonable
# could be due to a different accumulation out of the box (tf32 for instance)
for activation in filter(
lambda x: x not in (Activation.SquaredReLU, Activation.StarReLU), Activation
):
torch_activation = build_activation(activation.value)
res_torch = torch_activation(torch.addmm(c, a, b))
triton_activation_index = get_triton_activation_index(activation)
print(activation, triton_activation_index)
res_triton, _ = fused_matmul(
a, b.transpose(1, 0).contiguous(), c, triton_activation_index
)
torch.testing.assert_close(
res_torch,
res_triton,
atol=1e-3,
rtol=1e-3,
msg=f"Fused matmul broken with activation {activation}",
) |
Check that PyTorch and fused linear layers give the same result | def test_fused_linear_parity(shape, activation: Activation, bias: bool, amp: bool):
"""Check that PyTorch and fused linear layers give the same result"""
# TODO: fix or remove this
pytest.skip("This is broken")
torch.random.manual_seed(0)
# Instantiate pytorch and fused layers, same initialization
X = torch.normal(0, 1, size=shape, device="cuda")
X.requires_grad_()
torch_linear = torch.nn.Linear(shape[-1], shape[-1] // 2, bias=bias).to("cuda")
torch_sequence = torch.nn.Sequential(torch_linear, build_activation(activation))
torch.random.manual_seed(0)
X_ = torch.normal(0, 1, size=shape, device="cuda")
X_.requires_grad_()
# pyre-ignore[16]: TODO(T101400990): Pyre did not recognize the
# `FusedLinear` import.
triton_fused_linear = FusedLinear(
shape[-1], shape[-1] // 2, bias=bias, activation=activation
).to("cuda")
# Now check parity
torch_linear.train()
triton_fused_linear.train()
torch_linear.zero_grad()
triton_fused_linear.zero_grad()
torch.testing.assert_close(
triton_fused_linear.weight,
torch_linear.weight,
atol=1e-3,
rtol=1e-3,
msg="Broken test setup",
)
torch.testing.assert_close(X, X_, atol=1e-3, rtol=1e-3, msg="Broken test setup")
with autocast(enabled=amp):
y_torch = torch_sequence(X)
y_triton = triton_fused_linear(X_)
grad = torch.randn_like(y_torch)
# Check that BW also gives the same result
y_torch.backward(grad)
y_triton.backward(grad)
torch.testing.assert_close(X, X_, atol=1e-3, rtol=1e-3)
# Input grad being correct checks both the loss + some of the backward pass
assert X.grad is not None and X_.grad is not None
torch.testing.assert_close(X.grad, X_.grad, atol=1e-3, rtol=1e-3)
# Check that the linear layer bias are also properly trainable
if bias:
assert (
triton_fused_linear.bias is not None
and triton_fused_linear.bias.grad is not None
)
assert torch_linear.bias is not None and torch_linear.bias.grad is not None
torch.testing.assert_close(
torch_linear.bias.grad,
triton_fused_linear.bias.grad,
atol=1e-3,
rtol=1e-3,
)
# Check that the linear layer weights are also properly trainable
assert (
torch_linear.weight.grad is not None
and triton_fused_linear.weight.grad is not None
)
torch.testing.assert_close(
torch_linear.weight.grad,
triton_fused_linear.weight.grad,
atol=1e-3,
rtol=1e-3,
) |
Check that PyTorch and Triton softmax give the same result | def test_layernorm_parity(shape, amp):
"""Check that PyTorch and Triton softmax give the same result"""
# Get the same inputs
torch.random.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
torch.random.manual_seed(0)
X_ = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True)
eps = 1e-4
# Initialize the two layers, weights are 1 and 0 by default, no randomness
torch_layernorm = torch.nn.LayerNorm(X.shape[-1], eps=eps).to("cuda")
triton_layernorm = FusedLayerNorm(X.shape[-1], affine=True, eps=eps).to("cuda")
with autocast(enabled=amp):
assert torch.allclose(X, X_) # sanity checking, else all hell breaks loose
# Check the forward pass
y_torch = torch_layernorm(X)
y_triton = triton_layernorm(X_)
assert torch.allclose(
y_torch.norm(), y_triton.norm(), atol=1e-3
), f"{torch.norm(y_torch)} vs. {torch.norm(y_triton)}"
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton)
loss_triton.backward()
print(torch.norm(y_torch), torch.norm(y_triton))
print(y_torch[0, :])
print(y_triton[0, :])
# There are 3 items to check:
# - gradient on the inputs
assert torch.allclose(
X.grad, X_.grad
), f"Inputs grad mismatch: {torch.norm(X.grad)} vs. {torch.norm(X_.grad)}"
# - gradient on the layernorm weight
assert torch.allclose(
torch_layernorm.weight.grad, triton_layernorm.weight.grad, atol=1e-3
), (
f"Weight grad mismatch: {torch.norm(torch_layernorm.weight.grad)} vs."
+ f" {torch.norm(triton_layernorm.weight.grad)}"
)
# - gradient on the layernorm bias
assert torch.allclose(
torch_layernorm.bias.grad, triton_layernorm.bias.grad, atol=1e-3
), (
f"Bias grad mismatch: {torch.norm(torch_layernorm.bias.grad)} vs."
+ f" {torch.norm(triton_layernorm.bias.grad)}"
) |
Subsets and Splits