text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
resource "azurerm_resource_group" "rg" {
location = data.azurerm_resource_group.rg.location
name = local.resource_group_name
tags = merge(
local.tre_shared_service_tags,
{
project = "Azure Trusted Research Environment",
source = "https://github.com/microsoft/AzureTRE/"
},
)
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_databricks_workspace" "databricks" {
name = local.databricks_workspace_name
resource_group_name = local.resource_group_name
location = azurerm_resource_group.rg.location
sku = "premium"
managed_resource_group_name = local.managed_resource_group_name
infrastructure_encryption_enabled = true
public_network_access_enabled = false
network_security_group_rules_required = "NoAzureDatabricksRules"
tags = local.tre_shared_service_tags
lifecycle { ignore_changes = [tags] }
custom_parameters {
no_public_ip = true
public_subnet_name = azurerm_subnet.host.name
private_subnet_name = azurerm_subnet.container.name
virtual_network_id = azurerm_virtual_network.ws.id
public_subnet_network_security_group_association_id = azurerm_subnet_network_security_group_association.host.id
private_subnet_network_security_group_association_id = azurerm_subnet_network_security_group_association.container.id
}
depends_on = [
azurerm_subnet_network_security_group_association.host,
azurerm_subnet_network_security_group_association.container
]
}
|
AzureTRE/templates/shared_services/databricks-auth/terraform/main.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/databricks-auth/terraform/main.tf",
"repo_id": "AzureTRE",
"token_count": 823
}
| 118 |
# Azure Provider source and version being used
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.53.0"
}
}
backend "azurerm" {}
}
provider "azurerm" {
features {}
}
|
AzureTRE/templates/shared_services/firewall/terraform/providers.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/firewall/terraform/providers.tf",
"repo_id": "AzureTRE",
"token_count": 99
}
| 119 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.53.0"
constraints = "3.53.0"
hashes = [
"h1:bK70LV1NldhodSm58cUpawKwdUL1A5AKKglAV2wZ/QY=",
"zh:078ece8318ad7d6c1cd2e5f2044188e74af63921b93223c7f8d477539fa91888",
"zh:1bdc98ff8c2d3f3e81a746762e03d39794b2f5c90dc478cdb23dcc3d3f9947b6",
"zh:20b51cfc0ffc4ff368e6eb2eaece0b6bb99ade09e4b91b3444b50e94fc54c119",
"zh:233eed91279a9473825ba02d66487388d66dfc719b7249112d085dece0c2b594",
"zh:397ac8194ecc2f8d34d42600d6bf9e20399b222170dc1443b5800db3135ebc99",
"zh:3af3a2d8485d6c1ffcd26848af9ab087dfcb6cb045cc624e51f4db5144b53a9c",
"zh:5d0b9a346b57cccc369e2076556274225ec7f1c9044a2503dcfd8c117cdc2f79",
"zh:6e762dcef4ba14985f93af5f3fd195c9ee7d27de8de3bebdeefe761e53e79bb9",
"zh:73f9be719aa867985b1744c1f4fab834d01eb2069ec7a78b3a1bfa87c8256a40",
"zh:756deed30c20ffc9b4756c239e1675d3693f7175851e5ef946948a8bfb0b7935",
"zh:c279f99902a45a5b88d25d609a73709d101af3ce71222efbab9d4706c8a538b4",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
constraints = "3.5.1"
hashes = [
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}
provider "registry.terraform.io/hashicorp/template" {
version = "2.2.0"
constraints = "2.2.0"
hashes = [
"h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
]
}
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 1932
}
| 120 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/azure/azapi" {
version = "1.1.0"
constraints = "1.1.0"
hashes = [
"h1:IR+AHCwfjl1c0baWwfOwZ6QZtHj41H2syTgHkJtAr/M=",
"zh:2a25df6325a49f9e821f0b02c7da86167fc19a3bac647cd1edf231300f29d077",
"zh:2b443a836a39724663fe455d4deee408ff3a2d9a8b86f8408aa7db2e8aa743f8",
"zh:364ed09ddfc50d9bed8d930f7de489cb654a9908feb139413a097823a50075fd",
"zh:523bc005f56ae785867d230d55c29f59db4b599dbc6c38b4d03ea55a79458916",
"zh:60ded375fdb305b60bcb4d9e596dbb222cab166bad1b4958199b05a72aaeacfd",
"zh:61e69c58642fead6814e511c872b7c0a6478ec6af4ab758b4512607d910ac078",
"zh:823b2154ae2262dabcbd11aac992e3cc29eae0f7baa96bee1e3e2fe1ece8730b",
"zh:870ea9cc24807ef5142e4cad0281dac7173f7b6bf818a79762b6c690d12d4c4b",
"zh:9094ae76ed66cb328a4f35bd18b9140fb6fc6859c2e46431ec73c018bcb58d96",
"zh:d89149cfd01cb70012459536b4d36490b58e43312440562e5910bd5160537858",
"zh:dba7ec06171ca062fc423ba5b4776a5600444e45e57f4d1cb043bdc3eee538b7",
"zh:ff5bd6883d9ac8334e043434246357a55107411e9a962856c1d17e47ee15ac37",
]
}
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.37.0"
constraints = "3.37.0"
hashes = [
"h1:83XTgyPKUKt706IjTLHo9HL0KN5m+DwmSKuVQv6dNb4=",
"zh:2a7bda0b7679d1c791c762103a22f333b544b6e6776c4177f33bafc9cc28c919",
"zh:49ff49670c349f918017315838a43ece09bf6f1bf7721b992f1cadbceb273c62",
"zh:55c9346d03380585e17616b79c4233b726d6fb9efa1921848834fc881e5d7d54",
"zh:5ab117b56a4236ea29926e9d95c27d7bf8ae6706d0fffb76c0b1bfe67bf3a78e",
"zh:5cfc086d5d56308edb3e68aac5f8a448ddc6e56541be7b152ae886399e9b2c69",
"zh:7a8929ed38152aac6652711f32193c8582bc996f8fa73879a3ac7a9bf88d2460",
"zh:895294e90a37f719975fcd2269b95e973147e48ec0ebb9c2fe472bc93531b49c",
"zh:8baa5e2b6e5b02df5b45d253a3aea93f22619920cf9577290d682b59a6d5664b",
"zh:b146a732c7909238c10d216b92a35092be4f72a0509a4c6742cc3245bf3b3bf3",
"zh:cedef898ccd512a6519eae3dff7eb0d581d2c3dad8e0001992da16ad1d7fded8",
"zh:f016d9ba94ea88476883b4d63cff88a0225974e0a8b8c3e8555f73c5de6f7119",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/external" {
version = "2.2.2"
constraints = "2.2.2"
hashes = [
"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=",
"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca",
"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28",
"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b",
"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327",
"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955",
"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb",
"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0",
"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a",
"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372",
"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.4.3"
hashes = [
"h1:xZGZf18JjMS06pFa4NErzANI98qi59SEcBsOcS2P2yQ=",
"zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752",
"zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b",
"zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3",
"zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5",
"zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda",
"zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6",
"zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1",
"zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d",
"zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8",
"zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93",
]
}
|
AzureTRE/templates/workspace_services/azureml/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 2645
}
| 121 |
---
schemaVersion: 1.0.0
name: tre-service-databricks
version: 1.0.3
description: "An Azure TRE service for Azure Databricks."
registry: azuretre
dockerfile: Dockerfile.tmpl
credentials:
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: id
type: string
description: "Resource ID"
- name: address_space
type: string
- name: is_exposed_externally
type: boolean
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
env: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: arm_environment
env: ARM_ENVIRONMENT
type: string
default: "public"
outputs:
- name: databricks_workspace_name
type: string
applyTo:
- install
- upgrade
- name: connection_uri
type: string
applyTo:
- install
- upgrade
- name: databricks_storage_account_name
type: string
applyTo:
- install
- upgrade
- name: dbfs_blob_storage_domain
type: string
applyTo:
- install
- upgrade
- name: metastore_addresses
type: string
applyTo:
- install
- upgrade
- name: event_hub_endpoint_addresses
type: string
applyTo:
- install
- upgrade
- name: log_blob_storage_domains
type: string
applyTo:
- install
- upgrade
- name: artifact_blob_storage_domains
type: string
applyTo:
- install
- upgrade
- name: workspace_address_spaces
type: string
applyTo:
- install
- upgrade
- name: databricks_address_prefixes
type: string
applyTo:
- install
- upgrade
mixins:
- terraform:
clientVersion: 1.3.6
install:
- terraform:
description: "Deploy Databricks Service"
vars:
tre_resource_id: ${ bundle.parameters.id }
tre_id: ${ bundle.parameters.tre_id }
workspace_id: ${ bundle.parameters.workspace_id }
address_space: ${ bundle.parameters.address_space }
is_exposed_externally: ${ bundle.parameters.is_exposed_externally }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.name }-${ bundle.parameters.id }
outputs:
- name: databricks_workspace_name
- name: connection_uri
- name: databricks_storage_account_name
- name: dbfs_blob_storage_domain
- name: metastore_addresses
- name: event_hub_endpoint_addresses
- name: log_blob_storage_domains
- name: artifact_blob_storage_domains
- name: workspace_address_spaces
- name: databricks_address_prefixes
upgrade:
- terraform:
description: "Upgrade Databricks Service"
vars:
tre_resource_id: ${ bundle.parameters.id }
tre_id: ${ bundle.parameters.tre_id }
workspace_id: ${ bundle.parameters.workspace_id }
address_space: ${ bundle.parameters.address_space }
is_exposed_externally: ${ bundle.parameters.is_exposed_externally }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.name }-${ bundle.parameters.id }
outputs:
- name: databricks_workspace_name
- name: connection_uri
- name: databricks_storage_account_name
- name: dbfs_blob_storage_domain
- name: metastore_addresses
- name: event_hub_endpoint_addresses
- name: log_blob_storage_domains
- name: artifact_blob_storage_domains
- name: workspace_address_spaces
- name: databricks_address_prefixes
uninstall:
- terraform:
description: "Uninstall Azure Databricks Service"
vars:
tre_resource_id: ${ bundle.parameters.id }
tre_id: ${ bundle.parameters.tre_id }
workspace_id: ${ bundle.parameters.workspace_id }
address_space: ${ bundle.parameters.address_space }
is_exposed_externally: ${ bundle.parameters.is_exposed_externally }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.name }-${ bundle.parameters.id }
|
AzureTRE/templates/workspace_services/databricks/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/databricks/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 2182
}
| 122 |
ARG GITEA_TAG=1.17.3
ARG CERTIFICATE_URL=https://www.digicert.com/CACerts/BaltimoreCyberTrustRoot.crt.pem
FROM gitea/gitea:${GITEA_TAG}
# need to pass args to stage
ARG CERTIFICATE_URL
RUN wget -q -O /usr/local/share/ca-certificates/mysql.crt.pem ${CERTIFICATE_URL} && update-ca-certificates
COPY . /
RUN /usr/sbin/adduser -D -g users gitea
ENTRYPOINT ["/bin/bash", "-c", "./configure_gitea.sh & /usr/bin/entrypoint"]
|
AzureTRE/templates/workspace_services/gitea/docker/Dockerfile/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/docker/Dockerfile",
"repo_id": "AzureTRE",
"token_count": 193
}
| 123 |
variable "workspace_id" {
type = string
}
variable "tre_id" {
type = string
}
variable "id" {
type = string
}
variable "mgmt_resource_group_name" {
type = string
}
variable "mgmt_acr_name" {
type = string
}
variable "aad_authority_url" {
type = string
}
variable "gitea_storage_limit" {
type = number
description = "Space allocated in GB for the Gitea data in Azure Files Share"
default = 100
}
variable "arm_environment" {
type = string
}
variable "sql_sku" {
type = string
}
|
AzureTRE/templates/workspace_services/gitea/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 188
}
| 124 |
#!/usr/bin/env sh
echo >&2 "tomcat exited. code=${1}"
# terminate other services to exit from the container
exec s6-svscanctl -t /var/run/s6/services
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/tomcat/finish/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/tomcat/finish",
"repo_id": "AzureTRE",
"token_count": 53
}
| 125 |
{
"guacamoleVersion" : "1.4.0",
"name" : "Azure Trusted Research Environment Integration",
"namespace" : "guac-azuretre",
"authProviders" : [
"org.apache.guacamole.auth.azuretre.AzureTREAuthenticationProvider"
]
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/resources/guac-manifest.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/resources/guac-manifest.json",
"repo_id": "AzureTRE",
"token_count": 110
}
| 126 |
locals {
short_service_id = substr(var.tre_resource_id, -4, -1)
short_workspace_id = substr(var.workspace_id, -4, -1)
workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}"
service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}"
webapp_name = "guacamole-${local.service_resource_name_suffix}"
core_resource_group_name = "rg-${var.tre_id}"
aad_tenant_id = data.azurerm_key_vault_secret.aad_tenant_id.value
issuer = "${var.aad_authority_url}/${local.aad_tenant_id}/v2.0"
jwks_endpoint = "${var.aad_authority_url}/${local.aad_tenant_id}/discovery/v2.0/keys"
webapp_suffix = module.terraform_azurerm_environment_configuration.web_app_suffix
api_url = "https://api-${var.tre_id}.${local.webapp_suffix}"
keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}")
image_tag_from_file = replace(replace(replace(data.local_file.version.content, "__version__ = \"", ""), "\"", ""), "\n", "")
image_tag = var.image_tag == "" ? local.image_tag_from_file : var.image_tag
identity_name = "id-${local.webapp_name}"
workspace_service_tags = {
tre_id = var.tre_id
tre_workspace_id = var.workspace_id
tre_workspace_service_id = var.tre_resource_id
}
guacamole_diagnostic_categories_enabled = [
"AppServiceHTTPLogs", "AppServiceConsoleLogs", "AppServiceAppLogs", "AppServiceFileAuditLogs",
"AppServiceAuditLogs", "AppServiceIPSecAuditLogs", "AppServicePlatformLogs", "AppServiceAntivirusScanAuditLogs"
]
}
|
AzureTRE/templates/workspace_services/guacamole/terraform/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/terraform/locals.tf",
"repo_id": "AzureTRE",
"token_count": 869
}
| 127 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/template_schema.json",
"type": "object",
"title": "Import review Virtual Machine",
"description": "Windows virtual machine for import review",
"required": [
],
"authorizedRoles": [
"AirlockManager"
],
"properties": {
"os_image": {
"$id": "#/properties/os_image",
"type": "string",
"title": "Windows image",
"description": "Select Windows image to use for VM",
"enum": [
"Server 2019 Data Science VM"
]
},
"vm_size": {
"$id": "#/properties/vm_size",
"type": "string",
"title": "VM Size",
"description": "Select size of VM",
"enum": [
"2 CPU | 8GB RAM"
],
"updateable": true
},
"airlock_request_sas_url": {
"$id": "#/properties/airlock_request_sas_url",
"type": "string",
"title": "Airlock request SAS Token",
"description": "SAS Token for airlock request",
"updateable": false,
"sensitive": true
}
}
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 568
}
| 128 |
---
schemaVersion: 1.0.0
name: tre-service-guacamole-linuxvm
version: 0.6.9
description: "An Azure TRE User Resource Template for Guacamole (Linux)"
dockerfile: Dockerfile.tmpl
registry: azuretre
custom:
# For information on vm_sizes and image_options, see README.me in the guacamole/user-resources folder
vm_sizes:
"2 CPU | 8GB RAM": Standard_D2s_v5
"4 CPU | 16GB RAM": Standard_D4s_v5
"8 CPU | 32GB RAM": Standard_D8s_v5
"16 CPU | 64GB RAM": Standard_D16s_v5
image_options:
"Ubuntu 18.04":
source_image_reference:
publisher: canonical
offer: ubuntuserver
sku: 18_04-lts-gen2
version: latest
install_ui: true
conda_config: false
"Ubuntu 18.04 Data Science VM":
source_image_reference:
publisher: microsoft-dsvm
offer: ubuntu-1804
sku: 1804-gen2
version: latest
install_ui: false
conda_config: true
# For information on using custom images, see README.me in the guacamole/user-resources folder
# "Custom Image From Gallery":
# source_image_name: your-image
# install_ui: true
# conda_config: true
credentials:
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: azure_environment
type: string
default: "AzureCloud"
description: "Used by Azure CLI to set the Azure environment"
- name: parent_service_id
type: string
description: "Resource group containing the shared ACR"
env: PARENT_SERVICE_ID
- name: image_gallery_id
type: string
description: Azure resource ID for the compute image gallery to pull images from (if specifying custom images by name)
default: ""
# the following are added automatically by the resource processor
- name: id
type: string
description: "An Id for this installation"
env: id
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
env: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: arm_environment
env: ARM_ENVIRONMENT
type: string
default: "public"
- name: os_image
type: string
default: "Ubuntu 18.04 Data Science VM"
- name: vm_size
type: string
default: "2 CPU | 8GB RAM"
- name: shared_storage_access
type: boolean
default: true
- name: shared_storage_name
type: string
default: "vm-shared-storage"
outputs:
- name: ip
type: string
applyTo:
- install
- upgrade
- name: hostname
type: string
applyTo:
- install
- upgrade
- name: connection_uri
type: string
applyTo:
- install
- upgrade
- name: azure_resource_id
type: string
applyTo:
- install
- start
- stop
- reset_password
mixins:
- exec
- terraform:
clientVersion: 1.3.6
- az:
clientVersion: 2.37.0
install:
- terraform:
description: "Deploy Guacamole User Resource Service (Linux VM)"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
parent_service_id: ${ bundle.parameters.parent_service_id }
tre_resource_id: ${ bundle.parameters.id }
image: ${ bundle.parameters.os_image }
vm_size: ${ bundle.parameters.vm_size }
shared_storage_access: ${ bundle.parameters.shared_storage_access }
shared_storage_name: ${ bundle.parameters.shared_storage_name }
image_gallery_id: ${ bundle.parameters.image_gallery_id }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: ip
- name: hostname
- name: connection_uri
- name: azure_resource_id
upgrade:
- terraform:
description: "Update Guacamole User Resource Service (Linux VM)"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
parent_service_id: ${ bundle.parameters.parent_service_id }
tre_resource_id: ${ bundle.parameters.id }
image: ${ bundle.parameters.os_image }
vm_size: ${ bundle.parameters.vm_size }
shared_storage_access: ${ bundle.parameters.shared_storage_access }
shared_storage_name: ${ bundle.parameters.shared_storage_name }
image_gallery_id: ${ bundle.parameters.image_gallery_id }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: ip
- name: hostname
- name: connection_uri
- name: azure_resource_id
uninstall:
- exec:
description: "Delete the Extensions from the Terraform state manually"
command: ./delete_vm_extensions.sh
arguments:
- ${ bundle.parameters.tfstate_resource_group_name }
- ${ bundle.parameters.tfstate_storage_account_name }
- ${ bundle.parameters.tfstate_container_name }
- ${ bundle.parameters.id }
- terraform:
description: "Delete the Guacamole User Resource Service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
parent_service_id: ${ bundle.parameters.parent_service_id }
tre_resource_id: ${ bundle.parameters.id }
image: ${ bundle.parameters.os_image }
vm_size: ${ bundle.parameters.vm_size }
shared_storage_access: ${ bundle.parameters.shared_storage_access }
shared_storage_name: ${ bundle.parameters.shared_storage_name }
image_gallery_id: ${ bundle.parameters.image_gallery_id }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
start:
- terraform:
arguments:
- "output"
description: "Get resource ID from Terraform outputs"
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: azure_resource_id
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "Login to Azure"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id }
- az:
description: "Start the VM"
arguments:
- vm
- start
flags:
ids: ${ bundle.outputs.azure_resource_id }
stop:
- terraform:
arguments:
- "output"
description: "Get VM resource_id from Terraform outputs"
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: azure_resource_id
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "Login to Azure"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id }
- az:
description: "Stop the VM"
arguments:
- vm
- deallocate
flags:
ids: ${ bundle.outputs.azure_resource_id }
reset_password:
- terraform:
arguments:
- "output"
description: "Get VM details from Terraform outputs"
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: azure_resource_id
- name: vm_username
- name: vm_password_secret_name
- name: keyvault_name
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "Login to Azure"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id }
- exec:
description: "Reset password and persist to keyvault"
suppress-output: true
command: ./reset_password.sh
arguments:
- ${ bundle.outputs.vm_password_secret_name }
- ${ bundle.outputs.keyvault_name }
- ${ bundle.outputs.vm_username }
- ${ bundle.outputs.azure_resource_id }
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 4068
}
| 129 |
export TF_LOG=""
terraform init -input=false -backend=true -reconfigure -upgrade \
-backend-config="resource_group_name=$TF_VAR_mgmt_resource_group_name" \
-backend-config="storage_account_name=$TF_VAR_mgmt_storage_account_name" \
-backend-config="container_name=$TF_VAR_terraform_state_container_name" \
-backend-config="key=tre-service-mlflow-$TF_VAR_ID"
terraform plan -out tfplan
terraform apply tfplan -auto-approve
|
AzureTRE/templates/workspace_services/mlflow/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 170
}
| 130 |
# Azure Provider source and version being used
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "3.18.0"
}
random = {
source = "hashicorp/random"
version = "=3.4.2"
}
}
backend "azurerm" {}
}
provider "azurerm" {
features {
key_vault {
# Don't purge on destroy (this would fail due to purge protection being enabled on keyvault)
purge_soft_delete_on_destroy = false
purge_soft_deleted_secrets_on_destroy = false
purge_soft_deleted_certificates_on_destroy = false
purge_soft_deleted_keys_on_destroy = false
# When recreating an environment, recover any previously soft deleted secrets - set to true by default
recover_soft_deleted_key_vaults = true
recover_soft_deleted_secrets = true
recover_soft_deleted_certificates = true
recover_soft_deleted_keys = true
}
}
}
module "terraform_azurerm_environment_configuration" {
source = "git::https://github.com/microsoft/terraform-azurerm-environment-configuration.git?ref=0.2.0"
arm_environment = var.arm_environment
}
data "azurerm_resource_group" "ws" {
name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_virtual_network" "ws" {
name = "vnet-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_key_vault" "ws" {
name = local.keyvault_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_subnet" "services" {
name = "ServicesSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_private_dns_zone" "mysql" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.mysql.database.azure.com"]
resource_group_name = local.core_resource_group_name
}
|
AzureTRE/templates/workspace_services/mysql/terraform/main.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mysql/terraform/main.tf",
"repo_id": "AzureTRE",
"token_count": 859
}
| 131 |
create schema IF NOT EXISTS webapi_security;
DROP TABLE IF EXISTS webapi_security.security;
CREATE TABLE webapi_security.security
(
email character varying(255),
password character varying(255)
);
GRANT USAGE ON SCHEMA webapi_security TO PUBLIC;
GRANT ALL ON SCHEMA webapi_security TO GROUP ohdsi_admin;
do $$
declare tables_count integer := 0;
declare roles_count integer := 0;
begin
while tables_count <> 3 loop
raise notice 'Waiting for application security tables to become ready...';
PERFORM pg_sleep(10);
tables_count := (
SELECT COUNT(*)
FROM pg_tables
WHERE schemaname = 'webapi'
AND tablename in ('sec_user', 'sec_role', 'sec_user_role')
);
end loop;
raise notice 'All tables are ready.';
while roles_count <> 3 loop
raise notice 'Waiting for application security roles to become ready...';
PERFORM pg_sleep(10);
roles_count := (
SELECT COUNT(*)
FROM webapi.sec_role
WHERE id in (1, 2, 10)
);
end loop;
raise notice 'All roles are ready.';
raise notice 'Done.';
end$$;
|
AzureTRE/templates/workspace_services/ohdsi/sql/atlas_create_security.sql/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/sql/atlas_create_security.sql",
"repo_id": "AzureTRE",
"token_count": 404
}
| 132 |
resource "azurerm_key_vault_secret" "jdbc_connection_string_webapi_admin" {
name = "jdbc-connectionstring-${local.short_service_id}"
key_vault_id = data.azurerm_key_vault.ws.id
value = "jdbc:postgresql://${azurerm_postgresql_flexible_server.postgres.fqdn}:5432/${local.postgres_webapi_database_name}?user=${local.postgres_webapi_admin_username}&password=${azurerm_key_vault_secret.postgres_webapi_admin_password.value}&sslmode=require"
tags = local.tre_workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_user_assigned_identity" "ohdsi_webapi_id" {
name = "id-ohdsi-webapi-${local.service_suffix}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
tags = local.tre_workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_access_policy" "ohdsi_webapi" {
key_vault_id = data.azurerm_key_vault.ws.id
tenant_id = azurerm_user_assigned_identity.ohdsi_webapi_id.tenant_id
object_id = azurerm_user_assigned_identity.ohdsi_webapi_id.principal_id
secret_permissions = [
"Get", "List"
]
}
resource "azurerm_linux_web_app" "ohdsi_webapi" {
name = local.ohdsi_webapi_name
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
virtual_network_subnet_id = data.azurerm_subnet.web_app.id
service_plan_id = data.azurerm_service_plan.workspace.id
https_only = true
client_affinity_enabled = false
site_config {
always_on = true
ftps_state = "Disabled"
application_stack {
docker_image = "index.docker.io/${local.ohdsi_api_docker_image_name}"
docker_image_tag = local.ohdsi_api_docker_image_tag
}
}
key_vault_reference_identity_id = azurerm_user_assigned_identity.ohdsi_webapi_id.id
identity {
type = "UserAssigned"
identity_ids = [azurerm_user_assigned_identity.ohdsi_webapi_id.id]
}
app_settings = {
"DATASOURCE_DRIVERCLASSNAME" = "org.postgresql.Driver"
"DATASOURCE_OHDSI_SCHEMA" = local.postgres_schema_name
"DATASOURCE_USERNAME" = local.postgres_webapi_app_username
"DATASOURCE_PASSWORD" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${azurerm_key_vault_secret.postgres_webapi_app_password.name})"
"DATASOURCE_URL" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${azurerm_key_vault_secret.jdbc_connection_string_webapi_admin.name})"
"FLYWAY_BASELINEDESCRIPTION" = "Base Migration"
"FLYWAY_BASELINEONMIGRATE" = "true"
"flyway_baselineVersionAsString" = local.ohdsi_api_flyway_baseline_version
"FLYWAY_DATASOURCE_DRIVERCLASSNAME" = "org.postgresql.Driver"
"FLYWAY_DATASOURCE_USERNAME" = local.postgres_webapi_admin_username
"FLYWAY_DATASOURCE_PASSWORD" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${azurerm_key_vault_secret.postgres_webapi_admin_password.name})"
"FLYWAY_DATASOURCE_URL" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${azurerm_key_vault_secret.jdbc_connection_string_webapi_admin.name})"
"FLYWAY_LOCATIONS" = "classpath:db/migration/postgresql"
"FLYWAY_PLACEHOLDERS_OHDSISCHEMA" = local.postgres_schema_name
"FLYWAY_SCHEMAS" = local.postgres_schema_name
"FLYWAY_TABLE" = "schema_history"
"MANAGED_IDENTITY_CLIENT_ID" = azurerm_user_assigned_identity.ohdsi_webapi_id.id
"SECURITY_SSL_ENABLED" = "false"
"SECURITY_CORS_ENABLED" = "true"
"SECURITY_DB_DATASOURCE_AUTHENTICATIONQUERY" = "select password from webapi_security.security where email = ?"
"SECURITY_DB_DATASOURCE_PASSWORD" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${azurerm_key_vault_secret.postgres_webapi_admin_password.name})"
"SECURITY_DB_DATASOURCE_SCHEMA" = "webapi_security"
"SECURITY_DB_DATASOURCE_URL" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${azurerm_key_vault_secret.jdbc_connection_string_webapi_admin.name})"
"SECURITY_DB_DATASOURCE_USERNAME" = local.postgres_webapi_admin_username
"SECURITY_DURATION_INCREMENT" = "10"
"SECURITY_DURATION_INITIAL" = "10"
"SECURITY_MAXLOGINATTEMPTS" = "3"
"SECURITY_ORIGIN" = "*"
"SECURITY_PROVIDER" = "AtlasRegularSecurity"
"SPRING_BATCH_REPOSITORY_TABLEPREFIX" = "webapi.BATCH_"
"SPRING_JPA_PROPERTIES_HIBERNATE_DEFAULT_SCHEMA" = local.postgres_schema_name
"SPRING_JPA_PROPERTIES_HIBERNATE_DIALECT" = "org.hibernate.dialect.PostgreSQLDialect"
"WEBSITES_CONTAINER_START_TIME_LIMIT" = "1800"
"WEBSITES_ENABLE_APP_SERVICE_STORAGE" = false
"WEBSITES_PORT" = "8080"
"security.oid.clientId" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${data.azurerm_key_vault_secret.workspace_client_id.name})"
"security.oid.apiSecret" = "@Microsoft.KeyVault(VaultName=${data.azurerm_key_vault.ws.name};SecretName=${data.azurerm_key_vault_secret.workspace_client_secret.name})"
"security.oid.url" = "${module.terraform_azurerm_environment_configuration.active_directory_endpoint}/${data.azurerm_key_vault_secret.aad_tenant_id.value}/v2.0/.well-known/openid-configuration"
"security.oauth.callback.api" = local.ohdsi_webapi_url_auth_callback
"security.oauth.callback.ui" = local.atlas_ui_url_welcome
"security.oid.redirectUrl" = local.atlas_ui_url_welcome
"security.oid.logoutUrl" = local.atlas_ui_url_welcome
}
logs {
application_logs {
file_system_level = "Information"
}
http_logs {
file_system {
retention_in_days = 7
retention_in_mb = 100
}
}
}
tags = local.tre_workspace_service_tags
depends_on = [
terraform_data.deployment_ohdsi_webapi_init
]
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_endpoint" "webapi_private_endpoint" {
name = "pe-${azurerm_linux_web_app.ohdsi_webapi.name}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
subnet_id = data.azurerm_subnet.services.id
tags = local.tre_workspace_service_tags
private_service_connection {
private_connection_resource_id = azurerm_linux_web_app.ohdsi_webapi.id
name = "psc-${azurerm_linux_web_app.ohdsi_webapi.name}"
subresource_names = ["sites"]
is_manual_connection = false
}
private_dns_zone_group {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"]
private_dns_zone_ids = [data.azurerm_private_dns_zone.azurewebsites.id]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_monitor_diagnostic_setting" "ohdsi_webapi" {
name = azurerm_linux_web_app.ohdsi_webapi.name
target_resource_id = azurerm_linux_web_app.ohdsi_webapi.id
log_analytics_workspace_id = data.azurerm_log_analytics_workspace.workspace.id
dynamic "enabled_log" {
for_each = local.ohdsi_api_log_analytics_categories
content {
category = enabled_log.value
}
}
metric {
category = "AllMetrics"
enabled = true
}
}
|
AzureTRE/templates/workspace_services/ohdsi/terraform/ohdsi_web_api.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/ohdsi_web_api.tf",
"repo_id": "AzureTRE",
"token_count": 4147
}
| 133 |
resource "azurerm_log_analytics_workspace" "workspace" {
name = "log-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = var.resource_group_name
location = var.location
retention_in_days = 30
sku = "PerGB2018"
tags = var.tre_workspace_tags
internet_ingestion_enabled = var.enable_local_debugging ? true : false
lifecycle { ignore_changes = [tags] }
}
# Storage account for Application Insights
# Because Private Link is enabled on Application Performance Management (APM), Bring Your Own Storage (BYOS) approach is required
resource "azurerm_storage_account" "app_insights" {
name = lower(replace("stai${var.tre_id}ws${local.short_workspace_id}", "-", ""))
resource_group_name = var.resource_group_name
location = var.location
account_kind = "StorageV2"
account_tier = "Standard"
account_replication_type = "LRS"
allow_nested_items_to_be_public = false
tags = var.tre_workspace_tags
network_rules {
default_action = "Deny"
bypass = ["AzureServices"]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_log_analytics_linked_storage_account" "workspace_storage_ingestion" {
data_source_type = "Ingestion"
resource_group_name = var.resource_group_name
workspace_resource_id = azurerm_log_analytics_workspace.workspace.id
storage_account_ids = [azurerm_storage_account.app_insights.id]
}
resource "azurerm_log_analytics_linked_storage_account" "workspace_storage_customlogs" {
data_source_type = "CustomLogs"
resource_group_name = var.resource_group_name
workspace_resource_id = azurerm_log_analytics_workspace.workspace.id
storage_account_ids = [azurerm_storage_account.app_insights.id]
}
# TODO: Switch to azurerm once the followiung issue is resolved: https://github.com/microsoft/AzureTRE/issues/3625
# resource "azurerm_monitor_private_link_scope" "workspace" {
# name = "ampls-${var.tre_id}-ws-${local.short_workspace_id}"
# resource_group_name = var.resource_group_name
# tags = var.tre_workspace_tags
# lifecycle { ignore_changes = [tags] }
# }
resource "azapi_resource" "ampls_workspace" {
type = "microsoft.insights/privateLinkScopes@2021-07-01-preview"
name = "ampls-${var.tre_id}-ws-${local.short_workspace_id}"
parent_id = var.resource_group_id
location = "global"
tags = var.tre_workspace_tags
body = jsonencode({
properties = {
accessModeSettings = {
ingestionAccessMode = "PrivateOnly"
queryAccessMode = "PrivateOnly"
}
}
})
response_export_values = [
"id"
]
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_monitor_private_link_scoped_service" "ampls_log_anaytics" {
name = "ampls-log-anaytics-service"
resource_group_name = var.resource_group_name
scope_name = azapi_resource.ampls_workspace.name
linked_resource_id = azurerm_log_analytics_workspace.workspace.id
}
# Application Insights
# TODO: switch from the azapi implementation to azurerm when resolved https://github.com/microsoft/AzureTRE/issues/3200
# resource "azurerm_application_insights" "workspace" {
# name = local.app_insights_name
# location = var.location
# resource_group_name = var.resource_group_name
# workspace_id = azurerm_log_analytics_workspace.workspace.id
# application_type = "web"
# internet_ingestion_enabled = var.enable_local_debugging ? true : false
# force_customer_storage_for_profiler = true
# tags = var.tre_workspace_tags
# lifecycle { ignore_changes = [tags] }
# }
resource "azapi_resource" "appinsights" {
type = "Microsoft.Insights/components@2020-02-02"
name = local.app_insights_name
parent_id = var.resource_group_id
location = var.location
tags = var.tre_workspace_tags
body = jsonencode({
kind = "web"
properties = {
Application_Type = "web"
Flow_Type = "Bluefield"
Request_Source = "rest"
IngestionMode = "LogAnalytics"
WorkspaceResourceId = azurerm_log_analytics_workspace.workspace.id
ForceCustomerStorageForProfiler = true
publicNetworkAccessForIngestion = var.enable_local_debugging ? "Enabled" : "Disabled"
}
})
response_export_values = [
"id",
"properties.ConnectionString",
]
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_monitor_private_link_scoped_service" "ampls_app_insights" {
name = "ampls-app-insights-service"
resource_group_name = var.resource_group_name
scope_name = azapi_resource.ampls_workspace.name
# linked_resource_id = azurerm_application_insights.workspace.id
linked_resource_id = jsondecode(azapi_resource.appinsights.output).id
}
resource "azurerm_private_endpoint" "azure_monitor_private_endpoint" {
name = "pe-ampls-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = var.resource_group_name
location = var.location
subnet_id = var.workspace_subnet_id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
private_service_connection {
private_connection_resource_id = jsondecode(azapi_resource.ampls_workspace.output).id
name = "psc-ampls-${var.tre_id}-ws-${local.short_workspace_id}"
subresource_names = ["azuremonitor"]
is_manual_connection = false
}
private_dns_zone_group {
name = "azure-monitor-private-dns-zone-group"
private_dns_zone_ids = [
var.azure_monitor_dns_zone_id,
var.azure_monitor_oms_opinsights_dns_zone_id,
var.azure_monitor_ods_opinsights_dns_zone_id,
var.azure_monitor_agentsvc_dns_zone_id,
var.blob_core_dns_zone_id,
]
}
depends_on = [
azurerm_monitor_private_link_scoped_service.ampls_app_insights,
]
}
# We don't really need this, but if not present the RG will not be empty and won't be destroyed
# TODO: remove when this is resolved: https://github.com/hashicorp/terraform-provider-azurerm/issues/18026
resource "azurerm_monitor_action_group" "failure_anomalies" {
name = "${local.app_insights_name}-failure-anomalies-action-group"
resource_group_name = var.resource_group_name
short_name = "Failures"
tags = var.tre_workspace_tags
depends_on = [
# azurerm_application_insights.workspace
azapi_resource.appinsights
]
lifecycle { ignore_changes = [tags] }
}
# We don't really need this, but if not present the RG will not be empty and won't be destroyed
# TODO: remove when this is resolved: https://github.com/hashicorp/terraform-provider-azurerm/issues/18026
resource "azurerm_monitor_smart_detector_alert_rule" "failure_anomalies" {
name = "Failure Anomalies - ${local.app_insights_name}"
resource_group_name = var.resource_group_name
severity = "Sev3"
scope_resource_ids = [
# azurerm_application_insights.workspace.id
jsondecode(azapi_resource.appinsights.output).id
]
frequency = "PT1M"
detector_type = "FailureAnomaliesDetector"
tags = var.tre_workspace_tags
action_group {
ids = [azurerm_monitor_action_group.failure_anomalies.id]
}
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspaces/base/terraform/azure-monitor/azure-monitor.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/azure-monitor/azure-monitor.tf",
"repo_id": "AzureTRE",
"token_count": 3342
}
| 134 |
variable "location" {
type = string
}
variable "tre_id" {
type = string
}
variable "address_spaces" {
type = string
}
variable "ws_resource_group_name" {
type = string
}
variable "tre_workspace_tags" {
type = map(string)
}
variable "tre_resource_id" {
type = string
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/workspaces/base/terraform/network/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/network/variables.tf",
"repo_id": "AzureTRE",
"token_count": 116
}
| 135 |
# TRE UI
Please see the docs for a full overview and deployment instructions.
The UI was built using Create React App and Microsoft Fluent UI. Further details on this in the ./app/README.
## Run the UI
- Ensure `deploy_ui=false` is not set in your `./config.yaml` file
- In the root of the repo, run `make tre-deploy`. This will provision the necessary resources in Azure, build and deploy the UI to Azure blob storage, behind the App Gateway used for the API. The deployment process will also create the necessary `config.json`, using the `config.source.json` as a template.
- In Microsoft Entra ID, locate the TRE Client Apps app (possibly called Swagger App). In the Authentication section add reply URIs for:
- `http://localhost:3000` (if wanting to run locally)
- Your deployed App Url - `https://{TRE_ID}.{LOCATION}.cloudapp.azure.com`.
At this point you should be able to navigate to the web app in Azure, log in, and see your workspaces.
### To run locally
- `cd ./ui/app`
- `yarn start`
After making changes to the code, redeploy to Azure by running `make build-and-deploy-ui` in the root of the dev container.
|
AzureTRE/ui/README.md/0
|
{
"file_path": "AzureTRE/ui/README.md",
"repo_id": "AzureTRE",
"token_count": 308
}
| 136 |
import { IconButton, Spinner, Stack, TooltipHost } from "@fluentui/react";
import React, { useState } from "react";
import { Text } from '@fluentui/react/lib/Text';
interface CliCommandProps {
command: string,
title: string,
isLoading: boolean
}
export const CliCommand: React.FunctionComponent<CliCommandProps> = (props: CliCommandProps) => {
const COPY_TOOL_TIP_DEFAULT_MESSAGE = "Copy to clipboard"
const [copyToolTipMessage, setCopyToolTipMessage] = useState<string>(COPY_TOOL_TIP_DEFAULT_MESSAGE);
const handleCopyCommand = () => {
navigator.clipboard.writeText(props.command);
setCopyToolTipMessage("Copied")
setTimeout(() => setCopyToolTipMessage(COPY_TOOL_TIP_DEFAULT_MESSAGE), 3000);
}
const renderCommand = () => {
// regex to match only the command part (without the parameters)
const commandMatches = props.command.match(/^((?! -).)*/);
if (!commandMatches) {
return
}
const commandWithoutParams = commandMatches[0]
const paramsOnly = props.command.replace(commandWithoutParams, '')
// regex to match all the parameters, along with their assigned values
const paramsList = paramsOnly.match(/(?<= )-{1,2}[\w-]+(?:(?!( -){1,2}).)*/g)
return <Stack styles={{ root: { padding: "15px", backgroundColor: "#f2f2f2", border: '1px solid #e6e6e6' } }}>
<code style={{ color: "blue", fontSize: "13px" }}>
{commandWithoutParams}
</code>
<Stack.Item style={{ paddingLeft: "30px" }}>
{paramsList?.map((paramWithValue) => {
// split the parameter from it's value
const splitParam = paramWithValue.split(/\s(.*)/)
const param = splitParam[0];
const paramValue = ` ${splitParam[1] || ''}`;
const paramValueIsComment = paramValue?.match(/<.*?>/);
return (
<div style={{ wordBreak: "break-all", fontSize: "13px" }}>
<code style={{ color: "teal" }}>{param}</code>
<code style={{ color: paramValueIsComment ? "firebrick" : "black" }}>{paramValue}</code>
</div>
);
})}
</Stack.Item>
</Stack >
}
return (
<Stack>
<Stack horizontal style={{ backgroundColor: "#e6e6e6", alignItems: 'center' }}>
<Stack.Item grow style={{ paddingLeft: "10px", height: "100%" }}>
<Text >{props.title}</Text>
</Stack.Item>
<Stack.Item align="end">
<TooltipHost content={copyToolTipMessage}>
<IconButton
iconProps={{ iconName: 'copy' }}
styles={{ root: { minWidth: '40px' } }}
onClick={() => { props.command && handleCopyCommand() }} />
</TooltipHost>
</Stack.Item>
</Stack>
{(!props.isLoading) ? renderCommand() :
<Spinner label="Generating command..." style={{ padding: "15px", backgroundColor: "#f2f2f2", border: '1px solid #e6e6e6' }} />}
</Stack>
);
}
|
AzureTRE/ui/app/src/components/shared/CliCommand.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/CliCommand.tsx",
"repo_id": "AzureTRE",
"token_count": 1233
}
| 137 |
import React from 'react';
import { ProgressIndicator, Stack } from '@fluentui/react';
import { ResourceContextMenu } from '../shared/ResourceContextMenu';
import { ComponentAction, Resource, ResourceUpdate } from '../../models/resource';
import { StatusBadge } from './StatusBadge';
import { PowerStateBadge } from './PowerStateBadge';
interface ResourceHeaderProps {
resource: Resource,
latestUpdate: ResourceUpdate,
readonly?: boolean
}
export const ResourceHeader: React.FunctionComponent<ResourceHeaderProps> = (props: ResourceHeaderProps) => {
return (
<>
{props.resource && props.resource.id &&
<div className="tre-panel">
<Stack>
<Stack.Item style={!props.readonly ? { borderBottom: '1px #999 solid' } : {}}>
<Stack horizontal>
<Stack.Item grow={1}>
<div style={{ display: 'flex', alignItems: 'center' }}>
<h1 style={{ marginLeft: 5, marginTop: 5, marginRight: 15, marginBottom: 10 }}>
{props.resource.properties?.display_name}
</h1>
{
(props.resource.azureStatus?.powerState) &&
<PowerStateBadge state={props.resource.azureStatus.powerState} />
}
</div>
</Stack.Item>
{
(props.latestUpdate.operation || props.resource.deploymentStatus) &&
<Stack.Item align="center">
<StatusBadge
resource={props.resource}
status={
props.latestUpdate.operation?.status
? props.latestUpdate.operation.status
: props.resource.deploymentStatus
}
/>
</Stack.Item>
}
</Stack>
</Stack.Item>
{
!props.readonly &&
<Stack.Item>
<ResourceContextMenu
resource={props.resource}
commandBar={true}
componentAction={props.latestUpdate.componentAction}
/>
</Stack.Item>
}
{
props.latestUpdate.componentAction === ComponentAction.Lock &&
<Stack.Item>
<ProgressIndicator description="Resource locked while it updates" />
</Stack.Item>
}
</Stack>
</div>
}
</>
);
};
|
AzureTRE/ui/app/src/components/shared/ResourceHeader.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceHeader.tsx",
"repo_id": "AzureTRE",
"token_count": 1336
}
| 138 |
import { DefaultButton, DialogFooter, FontWeights, getTheme, IButtonStyles, IconButton, IIconProps, IStackItemStyles, IStackStyles, mergeStyleSets, MessageBar, MessageBarType, PrimaryButton, Shimmer, Spinner, SpinnerSize, Stack, TextField } from "@fluentui/react";
import { useCallback, useContext, useEffect, useState } from "react";
import { WorkspaceContext } from "../../../contexts/WorkspaceContext";
import { HttpMethod, useAuthApiCall } from "../../../hooks/useAuthApiCall";
import { AirlockRequest } from "../../../models/airlock";
import { ApiEndpoint } from "../../../models/apiEndpoints";
import { APIError } from "../../../models/exceptions";
import { destructiveButtonStyles, successButtonStyles } from "../../../styles";
import { ExceptionLayout } from "../ExceptionLayout";
import { UserResource } from '../../../models/userResource';
import { PowerStateBadge } from "../PowerStateBadge";
import { useComponentManager } from "../../../hooks/useComponentManager";
import { ComponentAction, Resource, VMPowerStates } from "../../../models/resource";
import { actionsDisabledStates, failedStates, inProgressStates, successStates } from "../../../models/operation";
import { useAppDispatch } from "../../../hooks/customReduxHooks";
import { addUpdateOperation } from "../notifications/operationsSlice";
import { StatusBadge } from "../StatusBadge";
import vmImage from "../../../assets/virtual_machine.svg";
import { useAccount, useMsal } from "@azure/msal-react";
interface AirlockReviewRequestProps {
request: AirlockRequest | undefined,
onUpdateRequest: (request: AirlockRequest) => void,
onReviewRequest: (request: AirlockRequest) => void,
onClose: () => void
}
export const AirlockReviewRequest: React.FunctionComponent<AirlockReviewRequestProps> = (props: AirlockReviewRequestProps) => {
const [request, setRequest] = useState<AirlockRequest>();
const [reviewExplanation, setReviewExplanation] = useState('');
const [reviewing, setReviewing] = useState(false);
const [reviewError, setReviewError] = useState(false);
const [reviewResourcesConfigured, setReviewResourcesConfigured] = useState(false);
const [reviewResourceStatus, setReviewResourceStatus] = useState<'notCreated' | 'creating' | 'created' | 'failed'>();
const [reviewResourceError, setReviewResourceError] = useState(false);
const [apiError, setApiError] = useState({} as APIError);
const [proceedToReview, setProceedToReview] = useState(false);
const [reviewResource, setReviewResource] = useState<UserResource>();
const [reviewWorkspaceScope, setReviewWorkspaceScope] = useState<string>();
const [otherReviewers, setOtherReviewers] = useState<Array<string>>();
const workspaceCtx = useContext(WorkspaceContext);
const apiCall = useAuthApiCall();
const dispatch = useAppDispatch();
const { accounts } = useMsal();
const account = useAccount(accounts[0] || {});
useEffect(() => setRequest(props.request), [props.request]);
// Check if Review Resources are configured for the current workspace
useEffect(() => {
if (
request
&& workspaceCtx.workspace?.properties.airlock_review_config
&& workspaceCtx.workspace?.properties.airlock_review_config[request.type]
) {
setReviewResourcesConfigured(true);
} else {
setReviewResourcesConfigured(false);
}
}, [request, workspaceCtx]);
// Get the review user resource if present in the airlock request
useEffect(() => {
const getReviewUserResource = async (userId: string) => {
setReviewResourceError(false);
try {
// Find the user's resource
const reviewWorkspaceId = request?.reviewUserResources[userId].workspaceId;
const reviewServiceId = request?.reviewUserResources[userId].workspaceServiceId;
const reviewResourceId = request?.reviewUserResources[userId].userResourceId;
// First fetch the scope for the review resource workspace if different to the airlock request workspace
let scopeId;
if (reviewWorkspaceId !== workspaceCtx.workspace.id) {
scopeId = (await apiCall(`${ApiEndpoint.Workspaces}/${reviewWorkspaceId}/scopeid`, HttpMethod.Get)).workspaceAuth.scopeId;
if (!scopeId) {
throw Error("Unable to get scope_id from review resource workspace - authentication not set up.");
}
} else {
scopeId = workspaceCtx.workspaceApplicationIdURI;
}
setReviewWorkspaceScope(scopeId);
// Get the review user resource
const resource = (await apiCall(
`${ApiEndpoint.Workspaces}/${reviewWorkspaceId}/${ApiEndpoint.WorkspaceServices}/${reviewServiceId}/${ApiEndpoint.UserResources}/${reviewResourceId}`,
HttpMethod.Get,
scopeId
)).userResource;
setReviewResource(resource);
} catch (err: any) {
err.userMessage = "Error retrieving resource";
setApiError(err);
setReviewResourceError(true);
}
};
if (reviewResourcesConfigured && account && request) {
const userId = account.localAccountId.split('.')[0];
if (userId in request.reviewUserResources) {
getReviewUserResource(userId);
} else {
setReviewResourceStatus('notCreated');
}
const otherReviewers = Object.keys(request.reviewUserResources).filter(id => id !== userId);
setOtherReviewers(otherReviewers);
}
}, [apiCall, request, workspaceCtx.workspace.id, workspaceCtx.workspaceApplicationIdURI, reviewResourcesConfigured, account]);
// Get the latest updates to the review resource to track deployment
const latestUpdate = useComponentManager(
reviewResource,
(r: Resource) => { setReviewResource(r as UserResource) },
() => { setReviewResource({} as UserResource) },
reviewWorkspaceScope // Pass this so component manager knows it might be different to the workspace context
);
// Set the review resource status
useEffect(() => {
if (reviewResource && latestUpdate) {
if (inProgressStates.includes(latestUpdate.operation?.status) || inProgressStates.includes(reviewResource.deploymentStatus)) {
setReviewResourceStatus('creating');
} else if (failedStates.includes(latestUpdate.operation?.status) || failedStates.includes(reviewResource.deploymentStatus)) {
setReviewResourceStatus('failed');
const err = new Error(latestUpdate.operation?.message) as any;
err.userMessage = 'An issue occurred while deploying the review resource.'
setApiError(new Error(latestUpdate.operation?.message));
setReviewResourceError(true);
} else if (successStates.includes(latestUpdate.operation?.status) || successStates.includes(reviewResource.deploymentStatus)) {
setReviewResourceStatus('created');
}
}
}, [latestUpdate, reviewResource, request])
// Create a review resource
const createReviewResource = useCallback(async () => {
setReviewResourceError(false);
setReviewResourceStatus('creating');
try {
const response = await apiCall(
`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.AirlockRequests}/${request?.id}/${ApiEndpoint.AirlockCreateReviewResource}`,
HttpMethod.Post,
workspaceCtx.workspaceApplicationIdURI
);
dispatch(addUpdateOperation(response.operation));
props.onUpdateRequest(response.airlockRequest);
} catch (err: any) {
err.userMessage = "Error creating review resource";
setApiError(err);
setReviewResourceError(true);
setReviewResourceStatus('failed');
}
}, [apiCall, workspaceCtx.workspaceApplicationIdURI, request?.id, workspaceCtx.workspace.id, dispatch, props])
// Review an airlock request
const reviewRequest = useCallback(async (isApproved: boolean) => {
if (request && reviewExplanation) {
setReviewing(true);
setReviewError(false);
try {
const review = {
approval: isApproved,
decisionExplanation: reviewExplanation
};
const response = await apiCall(
`${ApiEndpoint.Workspaces}/${request.workspaceId}/${ApiEndpoint.AirlockRequests}/${request.id}/${ApiEndpoint.AirlockReview}`,
HttpMethod.Post,
workspaceCtx.workspaceApplicationIdURI,
review
);
props.onReviewRequest(response.airlockRequest);
} catch (err: any) {
err.userMessage = 'Error reviewing airlock request';
setApiError(err);
setReviewError(true);
}
setReviewing(false);
}
}, [apiCall, request, workspaceCtx.workspaceApplicationIdURI, reviewExplanation, props]);
let statusBadge = <Shimmer></Shimmer>;
let action = <Spinner style={{marginRight:20}}></Spinner>;
// Get connection property for review userResource
let connectUri = '';
if (reviewResource?.properties && reviewResource.properties.connection_uri) {
connectUri = reviewResource.properties.connection_uri;
}
// Determine whether or not to show connect button or re-deploy
let resourceNotConnectable = true;
if (reviewResource) {
resourceNotConnectable = latestUpdate.componentAction === ComponentAction.Lock
|| actionsDisabledStates.includes(reviewResource.deploymentStatus)
|| !reviewResource.isEnabled
|| (reviewResource.azureStatus?.powerState && reviewResource.azureStatus.powerState !== VMPowerStates.Running)
|| !connectUri;
}
// Determine the relevant actions and status to show
switch (reviewResourceStatus) {
case 'creating':
statusBadge = <StatusBadge
resource={reviewResource}
status={latestUpdate.operation?.status}
/>;
break;
case 'notCreated':
statusBadge = <small>Not created</small>;
action = <PrimaryButton onClick={createReviewResource} text="Create" />;
break;
case 'failed':
statusBadge = <StatusBadge
resource={reviewResource}
status={latestUpdate.operation?.status}
/>;
action = <PrimaryButton onClick={createReviewResource} text="Retry" />;
break;
case 'created':
statusBadge = <PowerStateBadge state={reviewResource?.azureStatus.powerState} />;
if (resourceNotConnectable) {
action = <PrimaryButton
onClick={createReviewResource}
text="Re-deploy"
title="Re-deploy resource"
/>;
} else {
action = <PrimaryButton
onClick={() => window.open(connectUri)}
text="View data"
title="Connect to resource"
/>;
}
break;
}
const currentStep = !proceedToReview ? <>
<p>
To securely review the request's data, you need to create a review VM. Click "Create" and a VM will be created with the data
automatically downloaded onto it. Once you've viewed the data, click "Proceed to review" to make your decision.
</p>
{
reviewResourcesConfigured ? <>
<Stack horizontal horizontalAlign="space-between" styles={reviewVMStyles}>
<Stack.Item styles={reviewVMItemStyles}>
<img src={vmImage} alt="Virtual machine" width="50" />
<div style={{marginLeft:20}}>
<h3 style={{marginTop:0, marginBottom:2}}>Review VM</h3>
{ statusBadge }
</div>
</Stack.Item>
<Stack.Item styles={reviewVMItemStyles}>
{ action }
</Stack.Item>
</Stack>
{
otherReviewers && otherReviewers.length > 0 && <MessageBar messageBarType={MessageBarType.info}>
{
otherReviewers.length === 1
? <><b>1</b> other person is reviewing this request.</>
: <><b>{otherReviewers.length}</b> other people are reviewing this request.</>
}
</MessageBar>
}
{ reviewResourceError && <ExceptionLayout e={apiError} /> }
</> : <>
<MessageBar messageBarType={MessageBarType.severeWarning}>
It looks like review VMs aren't set up in your workspace. Please contact your Workspace Owner.
</MessageBar>
</>
}
<DialogFooter>
<DefaultButton onClick={props.onClose} text="Cancel" />
<PrimaryButton onClick={() => setProceedToReview(true)} text="Proceed to review" />
</DialogFooter>
</> : <>
<TextField
label="Reason for decision"
placeholder="Please provide a brief explanation of your decision."
value={reviewExplanation}
onChange={(e: React.FormEvent, newValue?: string) => setReviewExplanation(newValue || '')}
multiline
rows={6}
required
/>
{
reviewError && <ExceptionLayout e={apiError} />
}
{
reviewing
? <Spinner label="Submitting review..." ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} style={{marginTop:20}} />
: <DialogFooter>
<DefaultButton
onClick={() => setProceedToReview(false)}
text="Back"
styles={{root:{float:'left'}}}
/>
<DefaultButton
iconProps={{iconName: 'Cancel'}}
onClick={() => reviewRequest(false)}
text="Reject"
styles={destructiveButtonStyles}
disabled={reviewExplanation.length <= 0}
/>
<DefaultButton
iconProps={{iconName: 'Accept'}}
onClick={() => reviewRequest(true)}
text="Approve"
styles={successButtonStyles}
disabled={reviewExplanation.length <= 0}
/>
</DialogFooter>
}
</>
return (
<>
<div className={contentStyles.header}>
<span id={`title-${request?.id}`}>Review: {request?.title}</span>
<IconButton
styles={iconButtonStyles}
iconProps={cancelIcon}
ariaLabel="Close popup modal"
onClick={props.onClose}
/>
</div>
<div className={contentStyles.body}>
{ currentStep }
</div>
</>
)
}
const theme = getTheme();
const contentStyles = mergeStyleSets({
header: [
theme.fonts.xLarge,
{
flex: '1 1 auto',
borderTop: `4px solid ${theme.palette.themePrimary}`,
color: theme.palette.neutralPrimary,
display: 'flex',
alignItems: 'center',
fontWeight: FontWeights.semibold,
padding: '12px 12px 14px 24px',
},
],
body: {
flex: '4 4 auto',
padding: '0 24px 24px 24px',
overflowY: 'hidden',
selectors: {
p: { margin: '14px 0' },
'p:first-child': { marginTop: 0 },
'p:last-child': { marginBottom: 0 },
},
width: 600
},
});
const iconButtonStyles: Partial<IButtonStyles> = {
root: {
color: theme.palette.neutralPrimary,
marginLeft: 'auto',
marginTop: '4px',
marginRight: '2px',
},
rootHovered: {
color: theme.palette.neutralDark,
},
};
const cancelIcon: IIconProps = { iconName: 'Cancel' };
const reviewVMStyles: IStackStyles = {
root:{
marginTop: 20,
marginBottom: 20,
padding: 20,
backgroundColor: theme.palette.neutralLighter
}
};
const reviewVMItemStyles: IStackItemStyles = {
root: {
display:'flex',
alignItems:'center'
}
}
|
AzureTRE/ui/app/src/components/shared/airlock/AirlockReviewRequest.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/airlock/AirlockReviewRequest.tsx",
"repo_id": "AzureTRE",
"token_count": 5620
}
| 139 |
import { FontIcon, Spinner, SpinnerSize, Stack, getTheme, mergeStyles } from '@fluentui/react';
import React, { useContext, useEffect, useRef, useState } from 'react';
import { Route, Routes, useParams } from 'react-router-dom';
import { ApiEndpoint } from '../../models/apiEndpoints';
import { WorkspaceService } from '../../models/workspaceService';
import { HttpMethod, ResultType, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { WorkspaceHeader } from './WorkspaceHeader';
import { WorkspaceItem } from './WorkspaceItem';
import { WorkspaceLeftNav } from './WorkspaceLeftNav';
import { WorkspaceServiceItem } from './WorkspaceServiceItem';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { WorkspaceServices } from './WorkspaceServices';
import { Workspace } from '../../models/workspace';
import { SharedService } from '../../models/sharedService';
import { SharedServices } from '../shared/SharedServices';
import { SharedServiceItem } from '../shared/SharedServiceItem';
import { Airlock } from '../shared/airlock/Airlock';
import { APIError } from '../../models/exceptions';
import { LoadingState } from '../../models/loadingState';
import { ExceptionLayout } from '../shared/ExceptionLayout';
import { AppRolesContext } from '../../contexts/AppRolesContext';
import { RoleName, WorkspaceRoleName } from '../../models/roleNames';
export const WorkspaceProvider: React.FunctionComponent = () => {
const apiCall = useAuthApiCall();
const [selectedWorkspaceService, setSelectedWorkspaceService] = useState({} as WorkspaceService);
const [workspaceServices, setWorkspaceServices] = useState([] as Array<WorkspaceService>);
const [sharedServices, setSharedServices] = useState([] as Array<SharedService>);
const workspaceCtx = useRef(useContext(WorkspaceContext));
const [wsRoles, setWSRoles] = useState([] as Array<string>);
const [loadingState, setLoadingState] = useState(LoadingState.Loading);
const [apiError, setApiError] = useState({} as APIError);
const { workspaceId } = useParams();
const [costApiError, setCostApiError] = useState({} as APIError);
const appRoles = useContext(AppRolesContext);
const [isTREAdminUser, setIsTREAdminUser] = useState(false);
// set workspace context from url
useEffect(() => {
const getWorkspace = async () => {
try {
// get the workspace - first we get the scope_id so we can auth against the right aad app
let scopeId = (await apiCall(`${ApiEndpoint.Workspaces}/${workspaceId}/scopeid`, HttpMethod.Get)).workspaceAuth.scopeId;
const authProvisioned = scopeId !== "";
let wsRoles: Array<string> = [];
let ws: Workspace = {} as Workspace;
if (authProvisioned) {
// use the client ID to get a token against the workspace (tokenOnly), and set the workspace roles in the context
await apiCall(`${ApiEndpoint.Workspaces}/${workspaceId}`, HttpMethod.Get, scopeId,
undefined, ResultType.JSON, (roles: Array<string>) => {
wsRoles = roles;
}, true);
}
if (wsRoles && wsRoles.length > 0) {
ws = (await apiCall(`${ApiEndpoint.Workspaces}/${workspaceId}`, HttpMethod.Get, scopeId)).workspace;
workspaceCtx.current.setWorkspace(ws);
workspaceCtx.current.setRoles(wsRoles);
setWSRoles(wsRoles);
// get workspace services to pass to nav + ws services page
const workspaceServices = await apiCall(`${ApiEndpoint.Workspaces}/${ws.id}/${ApiEndpoint.WorkspaceServices}`,
HttpMethod.Get, ws.properties.scope_id);
setWorkspaceServices(workspaceServices.workspaceServices);
// get shared services to pass to nav shared services pages
const sharedServices = await apiCall(ApiEndpoint.SharedServices, HttpMethod.Get);
setSharedServices(sharedServices.sharedServices);
setLoadingState(LoadingState.Ok);
} else if (appRoles.roles.includes(RoleName.TREAdmin)) {
ws = (await apiCall(`${ApiEndpoint.Workspaces}/${workspaceId}`, HttpMethod.Get)).workspace;
workspaceCtx.current.setWorkspace(ws);
setLoadingState(LoadingState.Ok);
setIsTREAdminUser(true);
} else {
let e = new APIError();
e.status = 403;
e.userMessage = "User does not have a role assigned in the workspace or the TRE Admin role assigned";
e.endpoint = `${ApiEndpoint.Workspaces}/${workspaceId}`;
throw e;
}
} catch (e: any) {
if (e.status === 401 || e.status === 403) {
setApiError(e);
setLoadingState(LoadingState.AccessDenied);
} else {
e.userMessage = 'Error retrieving workspace';
setApiError(e);
setLoadingState(LoadingState.Error);
}
}
};
getWorkspace();
let ctx = workspaceCtx.current;
// Return a function to clear the context on unmount
return () => {
ctx.setRoles([]);
ctx.setWorkspace({} as Workspace);
};
}, [apiCall, workspaceId, isTREAdminUser, appRoles.roles]);
useEffect(() => {
const getWorkspaceCosts = async () => {
try {
// TODO: amend when costs enabled in API for WorkspaceRoleName.Researcher
if(wsRoles.includes(WorkspaceRoleName.WorkspaceOwner)){
let scopeId = (await apiCall(`${ApiEndpoint.Workspaces}/${workspaceId}/scopeid`, HttpMethod.Get)).workspaceAuth.scopeId;
const r = await apiCall(`${ApiEndpoint.Workspaces}/${workspaceId}/${ApiEndpoint.Costs}`, HttpMethod.Get, scopeId, undefined, ResultType.JSON);
const costs = [
...r.costs,
...r.workspace_services,
...r.workspace_services.flatMap((ws: { user_resources: any; }) => [
...ws.user_resources
])
];
workspaceCtx.current.setCosts(costs);
}
}
catch (e: any) {
if (e instanceof APIError) {
if (e.status === 404 /*subscription not supported*/) {
}
else if (e.status === 429 /*too many requests*/ || e.status === 503 /*service unavaiable*/) {
let msg = JSON.parse(e.message);
let retryAfter = Number(msg.error["retry-after"]);
setTimeout(getWorkspaceCosts, retryAfter * 1000);
}
else {
e.userMessage = 'Error retrieving costs';
}
}
else {
e.userMessage = 'Error retrieving costs';
}
setCostApiError(e);
}
};
getWorkspaceCosts();
},[apiCall, workspaceId, wsRoles]);
const addWorkspaceService = (w: WorkspaceService) => {
let ws = [...workspaceServices];
ws.push(w);
setWorkspaceServices(ws);
};
const updateWorkspaceService = (w: WorkspaceService) => {
let i = workspaceServices.findIndex((f: WorkspaceService) => f.id === w.id);
let ws = [...workspaceServices];
ws.splice(i, 1, w);
setWorkspaceServices(ws);
};
const removeWorkspaceService = (w: WorkspaceService) => {
let i = workspaceServices.findIndex((f: WorkspaceService) => f.id === w.id);
let ws = [...workspaceServices];
ws.splice(i, 1);
setWorkspaceServices(ws);
};
switch (loadingState) {
case LoadingState.Ok:
return (
<>
{
costApiError.message &&
<ExceptionLayout e={costApiError} />
}
<WorkspaceHeader />
<Stack horizontal className='tre-body-inner'>
<Stack.Item className='tre-left-nav'>
{!isTREAdminUser && (
<WorkspaceLeftNav
workspaceServices={workspaceServices}
sharedServices={sharedServices}
setWorkspaceService={(ws: WorkspaceService) => setSelectedWorkspaceService(ws)}
addWorkspaceService={(ws: WorkspaceService) => addWorkspaceService(ws)} />
)}
</Stack.Item>
<Stack.Item className='tre-body-content'>
<Stack>
<Stack.Item grow={100}>
<Routes>
<Route path="/" element={<>
<WorkspaceItem />
{!isTREAdminUser ? (
<WorkspaceServices workspaceServices={workspaceServices}
setWorkspaceService={(ws: WorkspaceService) => setSelectedWorkspaceService(ws)}
addWorkspaceService={(ws: WorkspaceService) => addWorkspaceService(ws)}
updateWorkspaceService={(ws: WorkspaceService) => updateWorkspaceService(ws)}
removeWorkspaceService={(ws: WorkspaceService) => removeWorkspaceService(ws)} />
) : (
<Stack className="tre-panel">
<Stack.Item>
<FontIcon iconName="WarningSolid"
className={warningIcon}
/>
You are currently accessing this workspace using the TRE Admin role. Additional functionality requires a workspace role, such as Workspace Owner.
</Stack.Item>
</Stack>
)}
</>}
/>
{!isTREAdminUser && (
<>
<Route path="workspace-services" element={
<WorkspaceServices workspaceServices={workspaceServices}
setWorkspaceService={(ws: WorkspaceService) => setSelectedWorkspaceService(ws)}
addWorkspaceService={(ws: WorkspaceService) => addWorkspaceService(ws)}
updateWorkspaceService={(ws: WorkspaceService) => updateWorkspaceService(ws)}
removeWorkspaceService={(ws: WorkspaceService) => removeWorkspaceService(ws)}
/>
} />
<Route path="workspace-services/:workspaceServiceId/*" element={
<WorkspaceServiceItem
workspaceService={selectedWorkspaceService}
updateWorkspaceService={(ws: WorkspaceService) => updateWorkspaceService(ws)}
removeWorkspaceService={(ws: WorkspaceService) => removeWorkspaceService(ws)} />
} />
<Route path="shared-services" element={
<SharedServices readonly={true} />
} />
<Route path="shared-services/:sharedServiceId/*" element={
<SharedServiceItem readonly={true} />
} />
<Route path="requests/*" element={
<Airlock />
} />
</>
)}
</Routes>
</Stack.Item>
</Stack>
</Stack.Item>
</Stack>
</>
);
case LoadingState.Error:
case LoadingState.AccessDenied:
return (
<ExceptionLayout e={apiError} />
);
default:
return (
<div style={{ marginTop: '20px' }}>
<Spinner label="Loading Workspace" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
);
}
};
const { palette } = getTheme();
const warningIcon = mergeStyles({
color: palette.orangeLight,
fontSize: 18,
marginRight: 8
});
|
AzureTRE/ui/app/src/components/workspaces/WorkspaceProvider.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/workspaces/WorkspaceProvider.tsx",
"repo_id": "AzureTRE",
"token_count": 5199
}
| 140 |
export enum LoadingState {
Ok = 'ok',
Error = 'error',
Loading = 'loading',
AccessDenied = "access-denied",
NotSupported = "not-supported"
}
|
AzureTRE/ui/app/src/models/loadingState.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/loadingState.ts",
"repo_id": "AzureTRE",
"token_count": 61
}
| 141 |
import { getTheme } from "@fluentui/react";
const { palette } = getTheme();
export const successButtonStyles = {
root: {
background: palette.green,
color: palette.white,
borderColor: palette.green
},
rootDisabled: {
background: 'rgb(16 124 16 / 60%)',
color: palette.white,
borderColor: palette.green,
iconColor: palette.white
},
iconDisabled: {
color: palette.white
}
}
export const destructiveButtonStyles = {
root: {
marginRight: 5,
background: palette.red,
color: palette.white,
borderColor: palette.red
},
rootDisabled: {
background: 'rgb(232 17 35 / 60%)',
color: palette.white,
borderColor: palette.red
},
iconDisabled: {
color: palette.white
}
}
|
AzureTRE/ui/app/src/styles.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/styles.ts",
"repo_id": "AzureTRE",
"token_count": 276
}
| 142 |
import sys
import json
from itertools import groupby
from turtle import title
inp_f = sys.argv[1]
out_f = sys.argv[2]
def read_pubtator(file):
file = open(file, "r")
lines = (line.strip() for line in file)
for k, g in groupby(lines, key=bool):
g = list(g)
if g[0]:
yield g
file.close()
def extract_pubtator(lines):
res = []
fixed_lines = [
str_with_null.replace('\x00', '')
for str_with_null in lines[2:]
]
for line in fixed_lines:
sline = line.split('\t')
if sline[1] == 'CID':
res.append(line+'\t1.0')
return res
data = read_pubtator(inp_f)
with open(out_f, 'w') as f:
for sample in data:
lines = extract_pubtator(sample)
lines = '\n'.join(lines)
print(lines[:-1], file=f)
|
BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/data/test/rment.py/0
|
{
"file_path": "BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/data/test/rment.py",
"repo_id": "BioGPT",
"token_count": 397
}
| 143 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
data_dir=sys.argv[1]
def build_target_seq(tgt):
tgt = 'the type of this document is ' + tgt + '.'
return tgt
def loader(fname, fn):
ret = []
cnt = 0
file = open(fname)
for line in file:
if line == '\n':
continue
cnt += 1
sent = line.split('\t')
source, target = sent[0].replace('\n', '').strip(), sent[1].replace('\n', '').strip()
if source[-1] == '.':
ret.append([source, fn(target)])
else:
ret.append([source +'.', fn(target)])
print(f"{cnt} samples in {fname} has been processed")
return ret
def dumper(content_list, prefix):
fw_source = open(prefix + ".x", "w")
fw_target = open(prefix + ".y", "w")
for ele in content_list:
print(ele[0], file=fw_source)
print(ele[1], file=fw_target)
fw_source.close()
fw_target.close()
def worker(fname, prefix, fn):
ret = loader(fname, fn)
dumper(ret, prefix)
for split in ['train', 'valid', 'test']:
worker(os.path.join(f"{data_dir}", f"{split}.tsv"), os.path.join(f"{data_dir}", f"ansis_{split}"), build_target_seq)
|
BioGPT/examples/DC-HoC/rebuild_data.py/0
|
{
"file_path": "BioGPT/examples/DC-HoC/rebuild_data.py",
"repo_id": "BioGPT",
"token_count": 570
}
| 144 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
from src.transformer_lm_prompt import TransformerLanguageModelPrompt
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='')
parser.add_argument("--model_dir", type=str, default=None)
parser.add_argument("--model_file", type=str, default="checkpoint_last.pt")
parser.add_argument("--src_file", type=str, default=None)
parser.add_argument("--output_file", type=str, default=None)
parser.add_argument("--beam", type=int, default=1)
parser.add_argument("--decoding_length", type=int, default=1024)
args, _ = parser.parse_known_args()
def main(args):
src_inputs = []
with open(args.src_file) as reader:
for line in reader:
src_inputs.append(line.strip())
m = TransformerLanguageModelPrompt.from_pretrained(
args.model_dir,
args.model_file,
args.data_dir,
max_len_b=args.decoding_length,
max_tokens=12000,)
print(m.cfg)
if m.cfg.common.fp16:
print('Converting to float 16')
m.half()
m.cuda()
outputs = m.sample(src_inputs, beam=args.beam)
with open(f"{args.output_file}", "w", encoding='utf8') as fw:
for i in range(len(outputs)):
fw.write(outputs[i] + '\n')
if __name__ == "__main__":
main(args)
|
BioGPT/inference.py/0
|
{
"file_path": "BioGPT/inference.py",
"repo_id": "BioGPT",
"token_count": 559
}
| 145 |
0.0.1.dev4
|
BitBLAS/VERSION/0
|
{
"file_path": "BitBLAS/VERSION",
"repo_id": "BitBLAS",
"token_count": 8
}
| 146 |
#!/usr/bin/env bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
nvidia-smi --query-gpu=memory.used --format=csv -lms 500
|
BitBLAS/integration/BitNet/nvidia_measure_memory.sh/0
|
{
"file_path": "BitBLAS/integration/BitNet/nvidia_measure_memory.sh",
"repo_id": "BitBLAS",
"token_count": 50
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Analysis on TIR blocks, loops and functions."""
from typing import List, Optional, Set, Union
from typing_extensions import Literal
from tvm import ir, tir, DataType
from tvm._ffi import get_global_func
from tvm.target.target import Target
from tvm.tir import Schedule, IterVar
from tvm.tir.schedule import BlockRV
class IterInfo:
"""Information about a loop/iter var."""
kind: Literal["S", "R", "O"]
var: tir.Var
_dom: tir.PrimExpr
loop_rv: tir.schedule.LoopRV
def __init__(
self,
kind: Literal["S", "R", "O"],
var: tir.Var,
dom: tir.PrimExpr,
loop_rv: tir.schedule.LoopRV,
):
"""Construct an IterInfo object."""
self.kind = kind
self.var = var
self._dom = dom
self.loop_rv = loop_rv
@property
def dom(self) -> Union[int, tir.PrimExpr]:
"""The iteration domain of the loop."""
return int(self._dom) if isinstance(self._dom, tir.IntImm) else self._dom
def __str__(self) -> str:
return f'Iter("{self.kind}", {self.dom})'
def __repr__(self) -> str:
return str(self)
class BlockInfo:
"""Information about a TIR block."""
name: str
iters: List[IterInfo]
block_rv: tir.schedule.BlockRV
_reduction_block: bool
def __init__(
self,
name: str,
iters: List[IterInfo],
block_rv: tir.schedule.BlockRV,
reduction_block: bool = False,
):
"""Construct a BlockInfo object."""
self.name = name
self.block_rv = block_rv
self.iters = iters
self._reduction_block = reduction_block
def dom(self) -> List[Union[int, tir.PrimExpr]]:
"""The iteration domain of the block."""
return [i.dom for i in self.iters]
def dom_kind(self) -> str:
"""The iteration domain kind of the block, for example, SSSS, SSSR."""
return "".join(i.kind for i in self.iters)
def is_injective(self) -> bool:
"""Whether the block is injective, i.e. all its iteration domains are injective."""
return all(k == "S" for k in self.dom_kind())
def is_elementwise(self, sch: tir.Schedule) -> bool:
"""Whether the block is elementwise, i.e. trivial mapping between read/write region"""
def _check_unit_var_range(dom: ir.Range, var: tir.Var) -> bool:
return dom.min.same_as(var) and dom.extent == 1
if not self.is_injective():
return False
block = sch.get(self.block_rv)
if len(block.reads) != 1 or len(block.writes) != 1:
return False
r_region = block.reads[0].region
w_region = block.writes[0].region
if len(r_region) != len(w_region):
return False
for var, r_dom, w_dom in zip(block.iter_vars, r_region, w_region):
if not _check_unit_var_range(var, r_dom) or not _check_unit_var_range(var, w_dom):
return False
return True
def is_reduction(self) -> bool:
"""Whether the block is a reduction workload."""
# TODO(@junrushao): distinguish GEMV and reduction
return self._reduction_block
def is_gemv(self) -> bool:
"""Whether the block is a GEMV workload."""
raise NotImplementedError
def is_gemm(self) -> bool:
"""Whether the block is a GEMM workload."""
raise NotImplementedError
def __str__(self) -> str:
return f'BlockInfo("{self.name}", "{self.dom_kind()}", {self.dom()})'
def __repr__(self) -> str:
return str(self)
_normalize_prim_func = get_global_func("tir.schedule.NormalizePrimFunc")
def normalize_prim_func(sch: tir.Schedule) -> Optional[List[BlockInfo]]:
"""Normalize the primfunc to normal form"""
try:
result = _normalize_prim_func(sch)
if result is None:
return None
except Exception: # pylint: disable=broad-except
return None
def _iter_kind(i: tir.IterVar) -> str:
return {
tir.IterVar.DataPar: "S",
tir.IterVar.CommReduce: "R",
}.get(i.iter_type, "O")
blocks: List[BlockInfo] = []
for block, loops, iters, is_reduction in zip(*result):
blocks.append(
BlockInfo(
name=sch.get(block).name_hint,
iters=[
IterInfo(
kind=_iter_kind(iter), # type: ignore
var=iter.var,
dom=iter.dom,
loop_rv=loop,
) for loop, iter in zip(loops, iters)
],
block_rv=block,
reduction_block=is_reduction,
))
return blocks
def find_var_from_func(func, var: str):
for buffer in func.buffer_map.values():
for i in buffer.shape:
if isinstance(i, tir.Var) and i.name == var:
return i
return None
def check_func_with_dynamic(func):
for buffer in func.buffer_map.values():
for i in buffer.shape:
if isinstance(i, tir.Var):
return True
return False
def _assert_gpu_target(target: Target):
if "gpu" not in target.keys:
raise ValueError(f"Expect a GPU target, but got {target}")
def get_max_threads_per_block(target: Target) -> int:
_assert_gpu_target(target)
max_threads_per_block = None
for name in ["max_threads_per_block", "max_num_threads"]:
if max_threads_per_block is None:
max_threads_per_block = target.attrs.get(name, None)
if max_threads_per_block is None:
max_threads_per_block = 64
return int(max_threads_per_block)
def get_max_shared_memory_per_block(target: Target) -> int:
_assert_gpu_target(target)
max_shared_memory_per_block = target.attrs.get("max_shared_memory_per_block", None)
if max_shared_memory_per_block is None:
raise ValueError(
f"Cannot find `max_shared_memory_per_block` in {target}, please specify it manually")
return int(max_shared_memory_per_block)
def get_root_block(sch: Schedule, func_name: str = "main") -> BlockRV:
try:
block = sch.mod[func_name].body.block
except Exception:
raise ValueError(f"The function body is expected to be the root block, but got:\n"
f"{sch.mod[func_name].body}") from None
return sch.get_block(block.name_hint)
def collect_block_iter_vars_used_in_access_region(block: tir.Block,
region: List[ir.Range]) -> Set[tir.Var]:
"""Collect the block iter variables used in the access region of a buffer region."""
tir_vars = set()
for expr in region:
if expr.extent != 1:
continue
tir_vars |= collect_vars_used_in_prim_expr(expr.min)
tir_vars &= set(iter_var.var for iter_var in block.iter_vars)
return tir_vars
def collect_vars_used_in_prim_expr(expr: tir.PrimExpr) -> Set[tir.Var]:
"""Collect the variables used in the PrimExpr."""
tir_vars = set()
def _collect_tir_var(expr):
if isinstance(expr, tir.Var):
tir_vars.add(expr)
tir.stmt_functor.post_order_visit(expr, _collect_tir_var)
return tir_vars
def detect_dominant_read(block: tir.Block) -> tir.PrimExpr:
"""Detect the dominant read indices in the block."""
dominant_read = None
num_read_iters = -1
for buffer_region in block.reads:
tir_vars = collect_block_iter_vars_used_in_access_region(block, buffer_region.region)
if num_read_iters < len(tir_vars):
num_read_iters = len(tir_vars)
dominant_read = buffer_region
assert dominant_read is not None
(result,) = dominant_read.buffer.offset_of([e.min for e in dominant_read.region])
return result
def is_broadcast_epilogue(
sch: tir.Schedule,
block: tir.schedule.BlockRV,
epilogue: tir.schedule.BlockRV,
) -> bool:
"""Check if the epilogue block is a broadcast pattern"""
write_buffers = {r.buffer for r in sch.get(block).writes}
epilogue_iters = {i.var: i for i in sch.get(epilogue).iter_vars if i.dom != 1}
for buffer_region in sch.get(epilogue).reads:
if buffer_region.buffer not in write_buffers:
continue
tir_vars = collect_block_iter_vars_used_in_access_region(
sch.get(epilogue), buffer_region.region)
if len(tir_vars) < len(epilogue_iters):
return True
return False
def get_reduction_blocks(sch: tir.Schedule,
blocks: List[tir.schedule.BlockRV]) -> List[tir.schedule.BlockRV]:
# Get the main computation block
def is_reduction(block: BlockRV) -> bool:
block_stmt = sch.get(block)
iter_types = {iter_var.iter_type for iter_var in block_stmt.iter_vars}
return iter_types == {IterVar.CommReduce, IterVar.DataPar}
def is_spatial(block: BlockRV) -> bool:
block_stmt = sch.get(block)
iter_types = {iter_var.iter_type for iter_var in block_stmt.iter_vars}
return iter_types == {IterVar.DataPar}
# NOTE: We assume there is only one reduction block in the function
# all blocks are required to be spatial or reduction
if not all([is_reduction(block) or is_spatial(block) for block in blocks]):
return None
# There is only one reduction block
reduction_blocks = [block for block in blocks if is_reduction(block)]
if len(reduction_blocks) == 0:
return None
return reduction_blocks
def get_coalesced_veclen(block_stmt: tir.Block, target_bits: int = 128) -> int:
# gpu memory prefer 128 bits coalesced access (e.g. four banks)
# 128 bits
buffers: List[tir.Buffer] = []
for read in block_stmt.reads:
buffers.append(read.buffer)
for write in block_stmt.writes:
buffers.append(write.buffer)
# pick the dtype with the largest bits
max_dtype_bits: int = 0
for buffer in buffers:
max_dtype_bits = max(max_dtype_bits, DataType(buffer.dtype).bits)
return target_bits // max_dtype_bits
|
BitBLAS/python/bitblas/base/analysis.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/analysis.py",
"repo_id": "BitBLAS",
"token_count": 4460
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from collections import OrderedDict
from typing import Dict, List
from tvm import arith
class Statement():
def __init__(self, output: str, dependent_region: dict, var_map: OrderedDict, range_map: OrderedDict):
self.output = output
self.dependent_region = dependent_region
self.var_map = var_map
self.range_map = range_map
def _merge_two_bounds(x: arith.ConstIntBound, y: arith.ConstIntBound):
return arith.ConstIntBound(min(x.min_value, y.min_value), max(x.max_value, y.max_value))
class InputShapeInference():
def __init__(self, deps: List[Statement]):
self.deps = deps
def _infer(self, shape: Dict[str, List[arith.ConstIntBound]], rstep: Dict[str, int]):
shape = shape.copy()
ana = arith.Analyzer()
for dep in reversed(self.deps):
for var, bound in zip(dep.var_map.values(), shape[dep.output]):
ana.update(var, bound)
for var, bound in dep.range_map.items():
if var.name in rstep:
bound = arith.ConstIntBound(0, min(bound.max_value, rstep[var.name] - 1))
ana.update(var, bound)
for name, regions in dep.dependent_region.items():
for region in regions:
bounds = [ana.const_int_bound(index) for index in region]
if name in shape: # simply merge two bounds
bounds = [_merge_two_bounds(x, y) for x, y in zip(shape[name], bounds)]
shape[name] = bounds
for name, bounds in shape.items():
shape[name] = [c.max_value - c.min_value + 1 for c in bounds]
return shape
def infer(self, shape, rstep: Dict[str, int] = {}):
if isinstance(shape, (list, tuple)):
shape = {"output0" : [arith.ConstIntBound(0, val - 1) for val in shape]}
shape = self._infer(shape, rstep)
return shape
def get_input_exprs(self, output_exprs):
result = output_exprs.copy()
ana = arith.Analyzer()
for dep in reversed(self.deps):
for var, expr in zip(dep.var_map.values(), result[dep.output]):
ana.bind(var, expr)
for var in dep.range_map:
ana.bind(var, 0)
for name, regions in dep.dependent_region.items():
if name in result:
continue
region = regions[0]
input_expr = [ana.simplify(index) for index in region]
result[name] = input_expr
return result
|
BitBLAS/python/bitblas/base/roller/shape_inference/common.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/shape_inference/common.py",
"repo_id": "BitBLAS",
"token_count": 1227
}
| 149 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from tvm.tir.function import TensorIntrin
from tvm.script import tir as T
from typing import Dict, Literal
from bitblas.quantization import (
_tir_packed_int_to_int_convert,
_tir_packed_to_signed_convert,
_tir_packed_to_unsigned_convert,
_tir_packed_to_unsigned_convert_with_zeros,
)
decode_i4_to_f16 = """
template <typename T1, typename T2, bool isSigned = false>
__device__ void decode_i4b_to_f16(T1 *_i4s, T2 *B_local_decode, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64086408 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
}
}
template <typename T1, typename T2>
__device__ void decode_i4s_to_f16(T1 *_i4s, T2 *B_local_decode, const int N = 8)
{
decode_i4b_to_f16<T1, T2, true>(_i4s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i4u_to_f16(T1 *_i4u, T2 *B_local_decode, const int N = 8)
{
decode_i4b_to_f16<T1, T2, false>(_i4u, B_local_decode, N);
}
"""
decode_i4_to_f16_scale = """
template <typename T1, typename T2, typename T3, bool isSigned = false, bool withScaling = false>
__device__ void decode_i4b_to_f16_scale(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64086408 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i4s_to_f16_scale(T1 *_i4s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale<T1, T2, T3, true, true>(_i4s, B_local_decode, N, scale);
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i4u_to_f16_scale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale<T1, T2, T3, false, true>(_i4u, B_local_decode, N, scale);
}
"""
decode_i4_to_f16_scale_zeros_original = """
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i4b_to_f16_zeros_original(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64086408 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
// input zeros maybe int32(qzeros) or half format
T4 const zero_r = *zeros;
uint const packed_zeros = __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_zeros));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i4u_to_f16_scale_zeros_original(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_zeros_original<T1, T2, T3, T4, false>(_i4u, B_local_decode, N, scale, zeros);
}
"""
decode_i4_to_f16_scale_zeros_rescale = """
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale_zeros_rescale(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64086408 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
T4 const zero_r = *zeros;
uint const packed_zeros = 0x80008000 | __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(packed_zeros));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i4u_to_f16_scale_zeros_rescale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale_zeros_rescale<T1, T2, T3, T4, false>(_i4u, B_local_decode, N, scale, zeros);
}
"""
decode_i4_to_f16_scale_zeros_quantized = """
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale_zeros_quantized(T1 *_i4s, T2 *B_local_decode, const int N = 8, const T3 *scale = nullptr, const T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
uint const i4s = *reinterpret_cast<uint *>(_i4s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
// input zeros maybe int32(qzeros) or half format
T4 const zero_r = *zeros;
uint median_num = ((0xe400 | zero_r) << 16) | (0xe400 | zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("add.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(median_num));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename storage_dtype, typename target_dtype, typename scale_dtype, typename zero_dtype>
__device__ void decode_i4u_to_f16_scale_zeros_quantized(storage_dtype *_i4u, target_dtype *B_local_decode, scale_dtype *scale = nullptr, zero_dtype *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale_zeros_quantized<storage_dtype, target_dtype, scale_dtype, zero_dtype, false>(_i4u, B_local_decode, N, scale, zeros);
}
"""
decode_i2_to_f16 = """
template <typename T1, typename T2, bool isSigned = false>
__device__ void decode_i2b_to_f16(T1 *_i2s, T2 *B_local_decode, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64026402 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
}
}
template <typename T1, typename T2>
__device__ void decode_i2s_to_f16(T1 *_i2s, T2 *B_local_decode, const int N = 8)
{
decode_i2b_to_f16<T1, T2, true>(_i2s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_f16(T1 *_i2u, T2 *B_local_decode, const int N = 8)
{
decode_i2b_to_f16<T1, T2, false>(_i2u, B_local_decode, N);
}
"""
decode_i2_to_f16_scale = """
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale(T1 *_i2s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64026402 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2s_to_f16_scale(T1 *_i2s, T2 *B_local_decode, T3 *scale, const int N = 8)
{
decode_i2b_to_f16_scale<T1, T2, T3, true>(_i2s, B_local_decode, scale, N);
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2u_to_f16_scale(T1 *_i2u, T2 *B_local_decode, T3 *scale, const int N = 8)
{
decode_i2b_to_f16_scale<T1, T2, T3, false>(_i2u, B_local_decode, scale, N);
}
"""
decode_i2_to_f16_scale_zeros_original = """
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale_zeros_original(T1 *_i2s, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64026402 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2u_to_f16_scale_zeros_original(T1 *_i2u, T2 *B_local_decode, T3 *scale, T3 *zeros, const int N = 8)
{
decode_i2b_to_f16_scale_zeros_original<T1, T2, T3, false>(_i2u, B_local_decode, scale, zeros, N);
}
"""
decode_i2_to_f16_scale_zeros_rescale = """
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale_zeros_rescale(T1 *_i2s, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64026402 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2u_to_f16_scale_zeros_rescale(T1 *_i2u, T2 *B_local_decode, T3 *scale, T3 *zeros, const int N = 8)
{
decode_i2b_to_f16_scale_zeros_rescale<T1, T2, T3, false>(_i2u, B_local_decode, scale, zeros, N);
}
"""
decode_i2_to_f16_scale_zeros_quantized = """
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale_zeros_quantized(T1 *_i2s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr, T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
T4 const zero_r = *zeros;
uint median_num = ((0xe400 | zero_r) << 16) | (0xe400 | zero_r);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("add.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(median_num));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i2u_to_f16_scale_zeros_quantized(T1 *_i2u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i2b_to_f16_scale_zeros_quantized<T1, T2, T3, T4, false>(_i2u, B_local_decode, N, scale, zeros);
}
"""
decode_i1_to_f16 = """
template <typename T1, typename T2>
__device__ void decode_i1u_to_f16(T1 *_i1s, T2 *B_local_decode, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = 0x64006400;
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
}
}
template <typename T1, typename T2>
__device__ void decode_i1s_to_f16(T1 *_i1s, T2 *B_local_decode, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = 0x64006400;
static constexpr uint TRANSFORM_SUBTRACT = 0xbc00bc00; // for signed int 2x - 1
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("add.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(h[i]));
asm volatile("add.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(TRANSFORM_SUBTRACT));
}
}
"""
decode_i1_to_f16_scale = """
template <typename T1, typename T2, typename T3>
__device__ void decode_i1u_to_f16_scale(T1 *_i1s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = 0x64006400;
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i1s_to_f16_scale(T1 *_i1s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = 0x64006400;
static constexpr uint TRANSFORM_SUBTRACT = 0xbc00bc00; // for signed int 2x - 1
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("add.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(h[i]));
asm volatile("add.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(TRANSFORM_SUBTRACT));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
"""
decode_i1_to_f16_scale_zeros_original = """
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i1b_to_f16_zeros_original(T1 *_i1s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr, T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = 0x64006400;
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
// input zeros maybe int32(qzeros) or half format
T4 const zero_r = *zeros;
uint const packed_zeros = __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_zeros));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(0));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i1u_to_f16_scale_zeros_original(T1 *_i1u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i1b_to_f16_zeros_original<T1, T2, T3, T4, false>(_i1u, B_local_decode, N, scale, zeros);
}
"""
decode_i1_to_f16_scale_zeros_rescale = """
template <typename T1, typename T2, typename T3, typename T4, bool isSigned = false>
__device__ void decode_i1b_to_f16_scale_zeros_rescale(T1 *_i1s, T2 *B_local_decode, const int N = 8, T3 *scale = nullptr, T4 *zeros = nullptr)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00010001;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = 0x64006400;
// interleave {e31,e29,e27,e25,e23,e21,e19,e17,e15,e13,e11,e9,e7,e5,e3,e1,e30,e28,e26,e24,e22,e20,e18,e16,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode e7,e5,e3,e1,e8,e6,e4,e2,e0
int8_t const i1s_i16 = *reinterpret_cast<int8_t *>(_i1s);
int i1s = (i1s_i16 & 0x0f);
i1s |= ((i1s_i16 & 0xf0) << 12);
T3 const scale_r = *scale;
uint const packed_scales = __pack_half2(scale_r, scale_r);
T4 const zero_r = *zeros;
uint const packed_zeros = 0x80008000 | __pack_half2(zero_r, zero_r);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(h[i])
: "r"(i1s >> (1 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\\n" : "=r"(h[i]) : "r"(h[i]), "r"(packed_scales), "r"(packed_zeros));
}
}
template <typename T1, typename T2, typename T3, typename T4>
__device__ void decode_i1u_to_f16_scale_zeros_rescale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T4 *zeros = nullptr, const int N = 8)
{
decode_i1b_to_f16_scale_zeros_rescale<T1, T2, T3, T4, false>(_i4u, B_local_decode, N, scale, zeros);
}
"""
decode_i1s_to_i8s = """template <typename T1, typename T2>
__device__ void decode_i1s_to_i8s(T1 *_i1b, T2 *_i8s, const int N = 16)
{
int i8s[4];
// vector load
*reinterpret_cast<int4 *>(i8s) = *reinterpret_cast<int4 *>(_i8s);
int16_t i1b_i16 = *reinterpret_cast<int16_t *>(_i1b);
// permutate: {e0,e4,e8,e12,e2,e6,e10,e14,e1,e5,e9,e13,e3,e7,e11,e15}
// into: {e0,e4,e8,e12,x,x,x,x,e1,e5,e9,x,x,x,x,e13,e2,e6,e10,e14,e1,e5,e9,e13,e3,e7,e11,e15,x,x,x,x}
int i1b = (i1b_i16 & 0x0f0f);
i1b |= ((i1b_i16 & 0xf0f0) << 12);
// i1b {0..,e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {0..,e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// First, we extract the i1b and construct an intermediate fp16 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa; // 0b11101010
static constexpr uint BOTTOM_MASK = 0x01010101; // 0x1 -> 0b01 select 0,1
static constexpr uint I8s_MAGIC_NUM = 0x00000000;
static constexpr uint TRANSFORM_SUBTRACT = 0xffffffff; // for signed int 2x - 1
for (int i = 0; i < N / 4; i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i])
: "r"(i1b >> i), "n"(BOTTOM_MASK), "n"(I8s_MAGIC_NUM), "n"(immLut));
i8s[i] = __vadd4(i8s[i], i8s[i]);
i8s[i] = __vadd4(i8s[i], TRANSFORM_SUBTRACT);
}
*reinterpret_cast<int4 *>(_i8s) = *reinterpret_cast<int4 *>(i8s);
}
template <typename T1, typename T2>
__device__ void decode_i1u_to_i8s(T1 *_i1b, T2 *_i8s, const int N = 16)
{
int *i8s = reinterpret_cast<int *>(_i8s);
int16_t i1b_i16 = *reinterpret_cast<int16_t *>(_i1b);
// permutate: {e0,e4,e8,e12,e2,e6,e10,e14,e1,e5,e9,e13,e3,e7,e11,e15}
// into: {e0,e4,e8,e12,x,x,x,x,e1,e5,e9,x,x,x,x,e13,e2,e6,e10,e14,e1,e5,e9,e13,e3,e7,e11,e15,x,x,x,x}
int i1b = (i1b_i16 & 0x0f0f);
i1b |= ((i1b_i16 & 0xf0f0) << 12);
// i1b {0..,e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0}
// interleave {0..,e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// First, we extract the i1b and construct an intermediate fp16 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa; // 0b11101010
static constexpr uint BOTTOM_MASK = 0x01010101; // 0x1 -> 0b01 select 0,1
static constexpr uint I8s_MAGIC_NUM = 0x00000000;
static constexpr uint MEDIAN_NUM = 0x00000000;
for (int i = 0; i < N / 4; i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i])
: "r"(i1b >> i), "n"(BOTTOM_MASK), "n"(I8s_MAGIC_NUM), "n"(immLut));
}
}
"""
decode_i2s_to_i8s = """template <typename T1, typename T2>
__device__ void decode_i2s_to_i8s(T1 *_i2b, T2 *_i8s, const int N = 16)
{
// convert 8 int2b_t to 8 int8b_t -> 2 int32
uint *i8s = reinterpret_cast<uint *>(_i8s);
// i2b = {e7,e6,e5,e4,e3,e2,e1,e0}
// also require interleave {e7,e3,e6,e2,e5,e1,e4,e0}
uint const i2b = *reinterpret_cast<uint *>(_i2b);
// First, we extract the i4s and construct an intermediate fp16 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa; // 0b11101010
static constexpr uint BOTTOM_MASK = 0x03030303; // 0xf -> 0b11 select 0,3
static constexpr uint I8s_MAGIC_NUM = 0x00000000; // 1024
static constexpr uint MEDIAN_NUM = 0x02020202;
#pragma unroll
for (int i = 0; i < (N / 4); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i])
: "r"(i2b >> (2 * i)), "n"(BOTTOM_MASK), "n"(I8s_MAGIC_NUM), "n"(immLut));
i8s[i] = __vsub4(i8s[i], MEDIAN_NUM);
}
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_i8s(T1 *_i2b, T2 *_i8s, const int N = 16)
{
// convert 8 int2b_t to 8 int8b_t -> 2 int32
uint *i8s = reinterpret_cast<uint *>(_i8s);
// i2b = {e7,e6,e5,e4,e3,e2,e1,e0}
// also require interleave {e7,e3,e6,e2,e5,e1,e4,e0}
uint const i2b = *reinterpret_cast<uint *>(_i2b);
// First, we extract the i4s and construct an intermediate fp16 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa; // 0b11101010
static constexpr uint BOTTOM_MASK = 0x03030303; // 0xf -> 0b11 select 0,3
static constexpr uint I8s_MAGIC_NUM = 0x00000000; // 1024
#pragma unroll
for (int i = 0; i < (N / 4); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i])
: "r"(i2b >> (2 * i)), "n"(BOTTOM_MASK), "n"(I8s_MAGIC_NUM), "n"(immLut));
}
}
"""
decode_i4s_to_i8s = """template <typename T1, typename T2>
__device__ void decode_i4s_to_i8s(T1 *_i4b, T2 *_i8s, const int N = 16)
{
uint *i8s = reinterpret_cast<uint *>(_i8s);
uint *i4b = reinterpret_cast<uint *>(_i4b);
// First, we extract the i4s and construct an intermediate i8 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x0f0f0f0f; // 0xf -> 0b1111 select 0,4,8,12
static constexpr uint I4b_TO_I8s_MAGIC_NUM = 0x00000000; // 0
static constexpr uint MEDIAN_NUM = 0x07070707;
#pragma unroll
for (int i = 0; i < (N / 8); i++)
{
// Extract elt_01 - (i4s & 0x000f000f) | 0x64006400
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i])
: "r"(i4b[0] >> (4 * i)), "n"(BOTTOM_MASK), "n"(I4b_TO_I8s_MAGIC_NUM), "n"(immLut));
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i + 2])
: "r"(i4b[1] >> (4 * i)), "n"(BOTTOM_MASK), "n"(I4b_TO_I8s_MAGIC_NUM), "n"(immLut));
i8s[i] = __vsubss4(i8s[i], MEDIAN_NUM);
i8s[i + 2] = __vsubss4(i8s[i + 2], MEDIAN_NUM);
}
}
template <typename T1, typename T2>
__device__ void decode_i4u_to_i8s(T1 *_i4b, T2 *_i8s, const int N = 16)
{
uint *i8s = reinterpret_cast<uint *>(_i8s);
uint *i4b = reinterpret_cast<uint *>(_i4b);
// First, we extract the i4s and construct an intermediate i8 number.
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x0f0f0f0f; // 0xf -> 0b1111 select 0,4,8,12
static constexpr uint I4b_TO_I8s_MAGIC_NUM = 0x00000000; // 0
#pragma unroll
for (int i = 0; i < (N / 8); i++)
{
// Extract elt_01 - (i4s & 0x000f000f) | 0x64006400
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i])
: "r"(i4b[0] >> (4 * i)), "n"(BOTTOM_MASK), "n"(I4b_TO_I8s_MAGIC_NUM), "n"(immLut));
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\\n"
: "=r"(i8s[i + 2])
: "r"(i4b[1] >> (4 * i)), "n"(BOTTOM_MASK), "n"(I4b_TO_I8s_MAGIC_NUM), "n"(immLut));
}
}
"""
def get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
source_format="uint",
target_dtype="float16",
loops_extent=8,
with_scale=False,
with_zeros=False,
zeros_mode="original",
):
"""
loops extent is the number of elements to be decoded in one stage
for memory friendly process, the loops_extent should be a multiple of (sizeof(int) // 8).
However, for the case of int1b, it is not possible to decode 8 elements in one stage, so we have to use 16.
"""
if target_dtype == "float16":
d4f = "f16"
elif target_dtype == "int8":
d4f = "i8s"
else:
raise ValueError("Unsupported target dtype: {}".format(target_dtype))
source_symbol = "u" if source_format == "uint" else "s"
func_name = "decode_i{}{}_to_{}".format(source_bit, source_symbol, d4f)
if with_scale:
func_name += "_scale"
if with_zeros:
func_name += f"_zeros_{zeros_mode}"
assert storage_dtype in ["int8", "int32", "uint32"]
storage_nbit = int("".join(c for c in storage_dtype if c.isdigit()))
storage_type = str("".join(c for c in storage_dtype if not c.isdigit()))
elem_per_unit = storage_nbit // source_bit
n_storage_elems = loops_extent // elem_per_unit
if with_zeros and zeros_mode == "quantized":
decode_func = _tir_packed_to_unsigned_convert_with_zeros(storage_type, storage_nbit)
elif source_format == "int":
if source_bit == 1:
decode_func = _tir_packed_int_to_int_convert(storage_type, storage_nbit)
else:
decode_func = _tir_packed_to_signed_convert(storage_type, storage_nbit)
elif source_format == "uint":
decode_func = _tir_packed_to_unsigned_convert(storage_type, storage_nbit)
else:
raise ValueError("Unsupported source_format: {}".format(source_format))
if with_scale is False:
@T.prim_func
def fast_decode_desc(compressed: T.handle, decompressed: T.handle) -> None:
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
with T.block("root"):
T.reads(Compressed[0:n_storage_elems])
T.writes(Decompressed[0:loops_extent])
for i in T.grid(loops_extent):
with T.block("decode"):
vi = T.axis.remap("S", [i])
Decompressed[vi] = decode_func(
source_bit,
Compressed[vi // elem_per_unit],
vi % elem_per_unit,
dtype=target_dtype,
)
@T.prim_func
def fast_decode_impl(compressed: T.handle, decompressed: T.handle) -> None:
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
with T.block("root"):
T.reads(Compressed[0:n_storage_elems])
T.writes(Decompressed[0:loops_extent])
T.call_extern(
"handle",
func_name,
Compressed.data,
Decompressed.data,
loops_extent,
)
elif with_zeros is False:
@T.prim_func
def fast_decode_desc(compressed: T.handle, decompressed: T.handle, scale: T.handle) -> None:
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
Scale = T.match_buffer(
scale,
[
1,
],
dtype=target_dtype,
scope="global",
)
with T.block("root"):
T.reads(Compressed[0:n_storage_elems], Scale[0:1])
T.writes(Decompressed[0:loops_extent])
for i in T.grid(loops_extent):
with T.block("decode"):
vi = T.axis.remap("S", [i])
Decompressed[vi] = (
decode_func(
source_bit,
Compressed[vi // elem_per_unit],
vi % elem_per_unit,
dtype=target_dtype,
) * Scale[0])
@T.prim_func
def fast_decode_impl(compressed: T.handle, decompressed: T.handle, scale: T.handle) -> None:
s0 = T.int32()
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
Scale = T.match_buffer(
scale,
[
1,
],
dtype=target_dtype,
offset_factor=1,
strides=[s0],
scope="global",
)
with T.block("root"):
T.reads(Compressed[0:n_storage_elems], Scale[0:1])
T.writes(Decompressed[0:loops_extent])
T.call_extern(
"handle",
func_name,
Compressed.data,
Decompressed.data,
Scale.access_ptr("r"),
loops_extent,
)
elif zeros_mode == "quantized":
def get_dequantize_buffers_list(weight, scale, zeros, zeros_mode="original"):
if zeros_mode == "original":
return [weight, zeros, scale]
elif zeros_mode == "rescale":
return [weight, scale, zeros]
elif zeros_mode == "quantized":
return [weight, zeros, scale]
else:
raise ValueError(f"Unsupported zeros_mode: {zeros_mode}")
def get_dequantize_func(weight, scale, zeros, zeros_mode="original"):
if zeros_mode == "original":
return (weight - zeros) * scale
elif zeros_mode == "rescale":
return weight * scale - zeros
elif zeros_mode == "quantized":
return weight * scale
else:
raise ValueError(f"Unsupported zeros_mode: {zeros_mode}")
# Scale with Zeros
@T.prim_func
def fast_decode_desc(
compressed: T.handle,
decompressed: T.handle,
scale: T.handle,
zeros: T.handle,
) -> None:
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
Scale = T.match_buffer(
scale,
[
1,
],
dtype=target_dtype,
scope="local",
)
Zeros = T.match_buffer(
zeros,
[
1,
],
dtype=storage_dtype,
scope="local",
)
with T.block("root"):
T.reads(*get_dequantize_buffers_list(
Compressed[0:n_storage_elems],
Scale[0:1],
Zeros[0:1],
zeros_mode=zeros_mode,
))
T.writes(Decompressed[0:loops_extent])
for i in T.grid(loops_extent):
with T.block("decode"):
vi = T.axis.remap("S", [i])
Decompressed[vi] = get_dequantize_func(
decode_func(
source_bit,
Compressed[vi // elem_per_unit],
vi % elem_per_unit,
Zeros[0],
dtype=target_dtype,
),
Scale[0],
Zeros[0],
zeros_mode,
)
@T.prim_func
def fast_decode_impl(
compressed: T.handle,
decompressed: T.handle,
scale: T.handle,
zeros: T.handle,
) -> None:
s0 = T.int32()
s1 = T.int32()
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
Scale = T.match_buffer(
scale,
[
1,
],
dtype=target_dtype,
offset_factor=1,
strides=[s0],
scope="local",
)
Zeros = T.match_buffer(
zeros,
[
1,
],
dtype=storage_dtype,
offset_factor=1,
strides=[s1],
scope="local",
)
with T.block("root"):
T.reads(Compressed[0:n_storage_elems], Scale[0:1], Zeros[0:1])
T.writes(Decompressed[0:loops_extent])
T.call_extern(
"handle",
func_name,
Compressed.data,
Decompressed.data,
Scale.access_ptr("r"),
Zeros.access_ptr("r"),
loops_extent,
)
else:
def get_dequantize_buffers_list(weight, scale, zeros, zeros_mode="original"):
if zeros_mode == "original":
return [weight, zeros, scale]
elif zeros_mode == "rescale":
return [weight, scale, zeros]
else:
raise ValueError(f"Unsupported zeros_mode: {zeros_mode}")
def get_dequantize_func(weight, scale, zeros, zeros_mode="original"):
if zeros_mode == "original":
return (weight - zeros) * scale
elif zeros_mode == "rescale":
return weight * scale - zeros
else:
raise ValueError(f"Unsupported zeros_mode: {zeros_mode}")
# Scale with Zeros
@T.prim_func
def fast_decode_desc(
compressed: T.handle,
decompressed: T.handle,
scale: T.handle,
zeros: T.handle,
) -> None:
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
Scale = T.match_buffer(
scale,
[
1,
],
dtype=target_dtype,
scope="global",
)
Zeros = T.match_buffer(
zeros,
[
1,
],
dtype=target_dtype,
scope="global",
)
with T.block("root"):
T.reads(*get_dequantize_buffers_list(
Compressed[0:n_storage_elems],
Scale[0:1],
Zeros[0:1],
zeros_mode=zeros_mode,
))
T.writes(Decompressed[0:loops_extent])
for i in T.grid(loops_extent):
with T.block("decode"):
vi = T.axis.remap("S", [i])
Decompressed[vi] = get_dequantize_func(
decode_func(
source_bit,
Compressed[vi // elem_per_unit],
vi % elem_per_unit,
dtype=target_dtype,
),
Scale[0],
Zeros[0],
zeros_mode,
)
@T.prim_func
def fast_decode_impl(
compressed: T.handle,
decompressed: T.handle,
scale: T.handle,
zeros: T.handle,
) -> None:
s0 = T.int32()
s1 = T.int32()
Compressed = T.match_buffer(
compressed,
[
n_storage_elems,
],
dtype=storage_dtype,
scope="local",
)
Decompressed = T.match_buffer(
decompressed,
[
loops_extent,
],
dtype=target_dtype,
scope="local",
)
Scale = T.match_buffer(
scale,
[
1,
],
dtype=target_dtype,
offset_factor=1,
strides=[s0],
scope="global",
)
Zeros = T.match_buffer(
zeros,
[
1,
],
dtype=target_dtype,
offset_factor=1,
strides=[s1],
scope="global",
)
with T.block("root"):
T.reads(Compressed[0:n_storage_elems], Scale[0:1], Zeros[0:1])
T.writes(Decompressed[0:loops_extent])
T.call_extern(
"handle",
func_name,
Compressed.data,
Decompressed.data,
Scale.access_ptr("r"),
Zeros.access_ptr("r"),
loops_extent,
)
return fast_decode_desc, fast_decode_impl
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_INTRIN = ("lop3_fast_decode_u4_to_int8_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=4, storage_dtype="int8", target_dtype="float16", loops_extent=8),
)
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_INTRIN = ("lop3_fast_decode_u2_to_int8_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=2, storage_dtype="int8", target_dtype="float16", loops_extent=8),
)
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_INTRIN = ("lop3_fast_decode_u1_to_int8_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=1, storage_dtype="int8", target_dtype="float16", loops_extent=8),
)
LOP3_FAST_DECODE_UINT4_TO_INT32_TO_FP16_L8_INTRIN = ("lop3_fast_decode_u4_to_int32_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT32_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=4, storage_dtype="int32", target_dtype="float16", loops_extent=8),
)
LOP3_FAST_DECODE_UINT4_TO_INT32_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_u4_to_int32_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT32_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int32",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
LOP3_FAST_DECODE_UINT4_TO_UINT32_TO_FP16_L8_INTRIN = ("lop3_fast_decode_u4_to_uint32_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_UINT32_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=4, storage_dtype="uint32", target_dtype="float16", loops_extent=8),
)
LOP3_FAST_DECODE_UINT4_TO_UINT32_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_u4_to_uint32_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_UINT32_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="uint32",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_u4_to_int8_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_ZEROS_ORIGINAL_INTRIN = (
"lop3_fast_decode_u4_to_int8_to_f16_l8_scale_zeros_original_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_ZEROS_ORIGINAL_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="original",
),
)
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_ZEROS_RESCALE_INTRIN = (
"lop3_fast_decode_u4_to_int8_to_f16_l8_scale_zeros_rescale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_ZEROS_RESCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="rescale",
),
)
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_ZEROS_QUANTIZED_INTRIN = (
"lop3_fast_decode_u4_to_int8_to_f16_l8_scale_zeros_quantized_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_FP16_L8_SCALE_ZEROS_QUANTIZED_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="quantized",
),
)
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_u2_to_int8_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=2,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_ZEROS_ORIGINAL_INTRIN = (
"lop3_fast_decode_u2_to_int8_to_f16_l8_scale_zeros_original_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_ZEROS_ORIGINAL_INTRIN,
*get_fast_decode_intrin(
source_bit=2,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="original",
),
)
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_ZEROS_RESCALE_INTRIN = (
"lop3_fast_decode_u2_to_int8_to_f16_l8_scale_zeros_rescale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_ZEROS_RESCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=2,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="rescale",
),
)
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_ZEROS_QUANTIZED_INTRIN = (
"lop3_fast_decode_u2_to_int8_to_f16_l8_scale_zeros_quantized_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_FP16_L8_SCALE_ZEROS_QUANTIZED_INTRIN,
*get_fast_decode_intrin(
source_bit=2,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="quantized",
),
)
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_u1_to_int8_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=1,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_SCALE_ZEROS_ORIGINAL_INTRIN = (
"lop3_fast_decode_u1_to_int8_to_f16_l8_scale_zeros_original_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_SCALE_ZEROS_ORIGINAL_INTRIN,
*get_fast_decode_intrin(
source_bit=1,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="original",
),
)
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_SCALE_ZEROS_RESCALE_INTRIN = (
"lop3_fast_decode_u1_to_int8_to_f16_l8_scale_zeros_rescale_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_FP16_L8_SCALE_ZEROS_RESCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=1,
storage_dtype="int8",
target_dtype="float16",
loops_extent=8,
with_scale=True,
with_zeros=True,
zeros_mode="rescale",
),
)
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_INT8_L8_INTRIN = ("lop3_fast_decode_u4_to_int8_to_i8_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_INT8_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=4, storage_dtype="int8", target_dtype="int8", loops_extent=8),
)
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_INT8_L16_INTRIN = ("lop3_fast_decode_u4_to_int8_to_i8_l16_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT4_TO_INT8_TO_INT8_L16_INTRIN,
*get_fast_decode_intrin(
source_bit=4, storage_dtype="int8", target_dtype="int8", loops_extent=16),
)
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_INT8_L16_INTRIN = ("lop3_fast_decode_u2_to_int8_to_i8_l16_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT2_TO_INT8_TO_INT8_L16_INTRIN,
*get_fast_decode_intrin(
source_bit=2, storage_dtype="int8", target_dtype="int8", loops_extent=16),
)
LOP3_FAST_DECODE_INT2_TO_INT8_TO_INT8_L16_INTRIN = ("lop3_fast_decode_i2_to_int8_to_i8_l16_")
TensorIntrin.register(
LOP3_FAST_DECODE_INT2_TO_INT8_TO_INT8_L16_INTRIN,
*get_fast_decode_intrin(
source_bit=2, source_format="int", storage_dtype="int8", target_dtype="int8", loops_extent=16),
)
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_INT8_L16_INTRIN = ("lop3_fast_decode_u1_to_int8_to_i8_l16_")
TensorIntrin.register(
LOP3_FAST_DECODE_UINT1_TO_INT8_TO_INT8_L16_INTRIN,
*get_fast_decode_intrin(
source_bit=1, storage_dtype="int8", target_dtype="int8", loops_extent=16),
)
LOP3_FAST_DECODE_INT1_TO_INT8_TO_INT8_L16_INTRIN = ("lop3_fast_decode_i1_to_int8_to_i8_l16_")
TensorIntrin.register(
LOP3_FAST_DECODE_INT1_TO_INT8_TO_INT8_L16_INTRIN,
*get_fast_decode_intrin(
source_bit=1, source_format="int", storage_dtype="int8", target_dtype="int8", loops_extent=16),
)
LOP3_FAST_DECODE_INT4_TO_INT8_TO_FP16_L8_INTRIN = ("lop3_fast_decode_i4_to_int8_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_INT4_TO_INT8_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
source_format="int",
target_dtype="float16",
loops_extent=8,
),
)
LOP3_FAST_DECODE_INT4_TO_INT8_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_i4_to_int8_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_INT4_TO_INT8_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=4,
storage_dtype="int8",
source_format="int",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
LOP3_FAST_DECODE_INT2_TO_INT8_TO_FP16_L8_INTRIN = ("lop3_fast_decode_i2_to_int8_to_f16_l8_")
TensorIntrin.register(
LOP3_FAST_DECODE_INT2_TO_INT8_TO_FP16_L8_INTRIN,
*get_fast_decode_intrin(
source_bit=2,
storage_dtype="int8",
source_format="int",
target_dtype="float16",
loops_extent=8,
),
)
LOP3_FAST_DECODE_INT2_TO_INT8_TO_FP16_L8_SCALE_INTRIN = (
"lop3_fast_decode_i2_to_int8_to_f16_l8_scale_")
TensorIntrin.register(
LOP3_FAST_DECODE_INT2_TO_INT8_TO_FP16_L8_SCALE_INTRIN,
*get_fast_decode_intrin(
source_bit=2,
storage_dtype="int8",
source_format="int",
target_dtype="float16",
loops_extent=8,
with_scale=True,
),
)
def get_lop3_intrin_group(
out_dtype: Literal["float16", "int8"],
source_format: Literal["int", "uint"] = "uint",
source_bit: int = 4,
storage_dtype: Literal["int32", "int8"] = "int8",
with_scaling: bool = False,
with_zeros: bool = False,
zeros_mode: Literal["original", "rescale", "quantized"] = "original",
) -> Dict[str, str]:
"""
This function is used to get the intrinsic group of the LOP3 operation to avoid the overhead of fast decoding.
LOP3 is a type of logic operation that takes three inputs. The intrinsic group refers to the set of
intrinsic operations that can be performed on these inputs. This function retrieves and returns this group.
Parameters
----------
in_dtype : Literal["int8"]
The data type of the input. It should be "int8".
out_dtype : Literal["float16", "int8"]
The data type of the output. It can be either "float16" or "int8".
storage_nbit : int, optional
The number of bits used for storage. By default, it is 4.
with_scale : bool, optional
A boolean parameter that indicates whether scaling should be applied. By default, it is False.
Returns
-------
Dict[str, str]
A dictionary mapping the names of the intrinsics to their corresponding implementations.
"""
assert out_dtype in ["float16", "int8"]
dtype_mapping = {"float16": "f16", "int8": "i8", "int32": "i32"}
target_dtype = dtype_mapping[out_dtype]
target_bits = tvm.DataType(out_dtype).bits
loop_extent = 128 // target_bits
if source_format not in ["int", "uint"]:
raise ValueError("Invalid source_format. Expected 'int' or 'uint'.")
source_symbol = "i" if source_format == "int" else "u"
_intrin = f"lop3_fast_decode_{source_symbol}{source_bit}_to_{storage_dtype}_to_{target_dtype}_l{loop_extent}_"
if with_scaling:
_intrin += "scale_"
if with_zeros:
_intrin += f"zeros_{zeros_mode}_"
import_c_map = {
"i4_to_f16": decode_i4_to_f16,
"i2_to_f16": decode_i2_to_f16,
"i1_to_f16": decode_i1_to_f16,
"i4_to_f16_scale": decode_i4_to_f16_scale,
"i2_to_f16_scale": decode_i2_to_f16_scale,
"i1_to_f16_scale": decode_i1_to_f16_scale,
"i4_to_f16_scale_zeros_original": decode_i4_to_f16_scale_zeros_original,
"i2_to_f16_scale_zeros_original": decode_i2_to_f16_scale_zeros_original,
"i1_to_f16_scale_zeros_original": decode_i1_to_f16_scale_zeros_original,
"i4_to_f16_scale_zeros_rescale": decode_i4_to_f16_scale_zeros_rescale,
"i2_to_f16_scale_zeros_rescale": decode_i2_to_f16_scale_zeros_rescale,
"i1_to_f16_scale_zeros_rescale": decode_i1_to_f16_scale_zeros_rescale,
"i4_to_f16_scale_zeros_quantized": decode_i4_to_f16_scale_zeros_quantized,
"i2_to_f16_scale_zeros_quantized": decode_i2_to_f16_scale_zeros_quantized,
"i1_to_i8": decode_i1s_to_i8s,
"i2_to_i8": decode_i2s_to_i8s,
"i4_to_i8": decode_i4s_to_i8s,
}
key = f"i{source_bit}_to_{target_dtype}"
if with_scaling:
key += "_scale"
if with_zeros:
key += f"_zeros_{zeros_mode}"
return {
"c_source": import_c_map[key],
"compute": _intrin,
}
|
BitBLAS/python/bitblas/gpu/intrin/lop3.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/intrin/lop3.py",
"repo_id": "BitBLAS",
"token_count": 34970
}
| 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Literal
from tvm import DataType
from tvm import IRModule
from tvm.ir import GlobalVar
from tvm.script import tir as T
# fmt: off
# TIR interleave weight impl-> 2D implementation
def tir_interleave_weight(
N: int = 2,
K: int = 16,
bits: int = 4,
QK: int = -1,
target_dtype: str = "float16",
storage_dtype: str = "int32",
):
if QK == -1:
QK = K * bits // 32
bits_stride = DataType(target_dtype).bits
mask = (1 << bits) - 1 # for 4bit the val is 0x0000000f
num_groups = 32 // bits_stride
elems_per_group = bits_stride // bits
@T.prim_func
def interleave_weight(A: T.Buffer((N, QK), storage_dtype), B: T.Buffer((N, QK), storage_dtype)):
for ax0, ax1, ax2, ax3 in T.grid(N, QK, num_groups, elems_per_group):
with T.block("B"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
offset = v2 * elems_per_group + v3
shift = (offset % num_groups) * bits_stride + (offset // num_groups) * bits
B[v0, v1] = B[v0, v1] | (((A[v0, v1] >> (bits * offset)) & mask) << shift)
@T.prim_func
def interleave_weight_f16_2b(A: T.Buffer((N, QK), storage_dtype), B: T.Buffer((N, QK),
storage_dtype)):
B_tmp_1 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_2 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_3 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
for ax0, ax1, ax2, ax3 in T.grid(N, QK, num_groups, elems_per_group):
with T.block("B_tmp"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
offset = v2 * elems_per_group + v3
shift = (offset % num_groups) * bits_stride + (offset // num_groups) * bits
B[v0, v1] = B[v0, v1] | (((A[v0, v1] >> (bits * offset)) & mask) << shift)
for ax0, ax1 in T.grid(N, QK):
with T.block("B"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
B_tmp_1[v0, v1] = B[v0, v1] & T.uint32(0xFF0000FF)
B_tmp_2[v0, v1] = ((B[v0, v1] & T.uint32(0x00FF0000)) << 8) >> 16
B_tmp_3[v0, v1] = ((B[v0, v1] & T.uint32(0x0000FF00)) << 16) >> 8
B[v0, v1] = B_tmp_1[v0, v1] | B_tmp_2[v0, v1] | B_tmp_3[v0, v1]
@T.prim_func
def interleave_weight_f16_1b(A: T.Buffer((N, QK), storage_dtype), B: T.Buffer((N, QK),
storage_dtype)):
B_tmp_1 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_2 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_3 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_4 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_5 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_6 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_7 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
for ax0, ax1, ax2, ax3 in T.grid(N, QK, num_groups, elems_per_group):
with T.block("B_tmp"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
offset = v2 * elems_per_group + v3
shift = (offset % num_groups) * bits_stride + (offset // num_groups) * bits
B[v0, v1] = B[v0, v1] | (((A[v0, v1] >> (bits * offset)) & mask) << shift)
for ax0, ax1 in T.grid(N, QK):
with T.block("B"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
B_tmp_1[v0, v1] = B[v0, v1] & T.uint32(0xF000000F)
B_tmp_2[v0, v1] = ((B[v0, v1] & T.uint32(0x000000F0)) >> 4) << 8
B_tmp_3[v0, v1] = ((B[v0, v1] & T.uint32(0x00000F00)) >> 8) << 16
B_tmp_4[v0, v1] = ((B[v0, v1] & T.uint32(0x0000F000)) >> 12) << 24
B_tmp_5[v0, v1] = ((B[v0, v1] & T.uint32(0x000F0000)) >> 16) << 8
B_tmp_6[v0, v1] = ((B[v0, v1] & T.uint32(0x00F00000)) >> 20) << 12
B_tmp_7[v0, v1] = ((B[v0, v1] & T.uint32(0x00F00000)) >> 24) << 20
B[v0, v1] = (
B_tmp_1[v0, v1]
| B_tmp_2[v0, v1]
| B_tmp_3[v0, v1]
| B_tmp_4[v0, v1]
| B_tmp_5[v0, v1]
| B_tmp_6[v0, v1]
| B_tmp_7[v0, v1])
@T.prim_func
def interleave_weight_int8_1b(A: T.Buffer((N, QK), storage_dtype), B: T.Buffer((N, QK),
storage_dtype)):
B_tmp_1 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_2 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_3 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_4 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
B_tmp_5 = T.alloc_buffer((N, QK), storage_dtype, scope="local")
for ax0, ax1, ax2, ax3 in T.grid(N, QK, num_groups, elems_per_group):
with T.block("B_tmp"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
offset = v2 * elems_per_group + v3
shift = (offset % num_groups) * bits_stride + (offset // num_groups) * bits
B[v0, v1] = B[v0, v1] | (((A[v0, v1] >> (bits * offset)) & mask) << shift)
for ax0, ax1 in T.grid(N, QK):
with T.block("B"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
B_tmp_1[v0, v1] = B[v0, v1] & T.uint32(0xF0F00F0F)
B_tmp_2[v0, v1] = ((B[v0, v1] & T.uint32(0x000000F0)) >> 4) << 16
B_tmp_3[v0, v1] = ((B[v0, v1] & T.uint32(0x0000F000)) >> 12) << 24
B_tmp_4[v0, v1] = ((B[v0, v1] & T.uint32(0x000F0000)) >> 16) << 4
B_tmp_5[v0, v1] = ((B[v0, v1] & T.uint32(0x0F000000)) >> 24) << 12
B[v0, v1] = (
B_tmp_1[v0, v1]
| B_tmp_2[v0, v1]
| B_tmp_3[v0, v1]
| B_tmp_4[v0, v1]
| B_tmp_5[v0, v1])
if target_dtype == "float16" and bits == 2:
return interleave_weight_f16_2b
elif target_dtype == "float16" and bits == 1:
return interleave_weight_f16_1b
elif target_dtype == "int8" and bits == 1:
return interleave_weight_int8_1b
return interleave_weight
# fmt: on
def select_implementation(
M: int,
N: int,
datatype: Literal["float16", "int8"] = "float16",
storage_dtype: Literal["int8", "uint8", "int32", "uint32"] = "int32",
dequantize_bits: int = 4,
):
func = tir_interleave_weight(
N=M,
K=N,
bits=dequantize_bits,
target_dtype=datatype,
storage_dtype=storage_dtype,
)
mod = IRModule()
mod.update_func(GlobalVar("main"), func)
return mod
|
BitBLAS/python/bitblas/ops/impl/lop3_permutate_impl.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/impl/lop3_permutate_impl.py",
"repo_id": "BitBLAS",
"token_count": 4120
}
| 151 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Optional, Tuple, Union, List, Dict
from tvm.ir import IRModule
from tvm.ir.transform import PassContext, module_pass
from tvm import relax
from tvm import tir
from enum import Enum
from tvm.ir import GlobalVar
from tvm.tir import IndexMap
from tvm.target import Target
from tvm.tir import IterVar
from tvm.tir.schedule.schedule import BlockRV
from tvm.relax import PyExprMutator
from tvm.relax.expr import Call
from bitblas.gpu.matmul_analysis import (
get_tensorized_func_and_tags,
get_propagate_map,
find_last_producer_from_buffer,
find_arg_idx_from_buffer_chain,
layout_propagate_chain,
)
from tvm.dlight.base import (
analysis,)
from dataclasses import dataclass
def get_reduction_blocks(sch, blocks) -> bool:
# Get the main computation block
def is_reduction(block: BlockRV) -> bool:
block_stmt = sch.get(block)
iter_types = {iter_var.iter_type for iter_var in block_stmt.iter_vars}
return iter_types == {IterVar.CommReduce, IterVar.DataPar}
def is_spatial(block: BlockRV) -> bool:
block_stmt = sch.get(block)
iter_types = {iter_var.iter_type for iter_var in block_stmt.iter_vars}
return iter_types == {IterVar.DataPar}
# NOTE: We assume there is only one reduction block in the function
# all blocks are required to be spatial or reduction
if not all([is_reduction(block) or is_spatial(block) for block in blocks]):
return None
# There is only one reduction block
reduction_blocks = [block for block in blocks if is_reduction(block)]
if len(reduction_blocks) != 1:
return None
return reduction_blocks
class TransformKind(Enum):
NonTransform = 0
InterWarpTransform = 1
IntraWarpTransform = 2
def check_sm_version(arch: str) -> int:
sm_version = arch.replace("sm_", "")
return int(sm_version) if sm_version.isdigit() else -1
def get_in_out_dtypes(block: tir.Block) -> Tuple[str]:
"""
Detect In/Out data types for the given block based on the analysis if read/write buffers.
"""
assert len(block.reads) > 0 and len(block.writes) > 0
in_dtype = block.reads[0].buffer.dtype
out_dtype = block.writes[0].buffer.dtype
return (in_dtype, out_dtype)
@dataclass
class LayoutTransformHint:
"""
A dataclass to store the layout transformation hint.
"""
transform_level: TransformKind
inter_warp_layout: IndexMap
intra_warp_layout: IndexMap
apply_arg_idx: int
@module_pass(opt_level=0, name="InsertLayoutTransform")
class WeightOnlyLayoutPropagation:
def __init__(
self,
transform_level: Union[int, TransformKind] = TransformKind.InterWarpTransform,
target: Optional[Target] = None,
faster_conversion: bool = False,
) -> None:
if isinstance(transform_level, int):
transform_level = TransformKind(transform_level)
assert transform_level in [
TransformKind.NonTransform,
TransformKind.InterWarpTransform,
TransformKind.IntraWarpTransform,
]
# transform_level 1: only transform the inter-warp memory layout
# transform_level 2: transform the inter-warp memory layout and the intra-warp memory layout
self.transform_level = transform_level
self.target = Target.current() if target is None else target
# fast type conversion on nvidia gpu also requires weight permutation
self.faster_conversion = faster_conversion
# layout transform info to sync the layout in both graph and tir
self.layout_transform_hints: Dict[str, List[LayoutTransformHint]] = {}
def detect_propagate_matmul(self, func: tir.PrimFunc, target: Target):
_, tags = get_tensorized_func_and_tags(func, target, skip_normalize=True, allow_gemv=True)
if tags is None:
return False, None
return True, tags["intrin_info"]
def transform_matmul(self, g_var: GlobalVar, func: tir.PrimFunc, intrin_info):
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_mma_intrin_group,)
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None or len(reduction_blocks) != 1:
return False
(main_block,) = reduction_blocks
intrin_group = get_mma_intrin_group(
load_scope="shared",
store_scope="shared",
a_dtype=intrin_info["in_dtype"],
b_dtype=intrin_info["in_dtype"],
out_dtype=intrin_info["out_dtype"],
trans_a=False,
trans_b=intrin_info["trans_b"],
)
_, inter_j, inter_k = intrin_group["micro_kernel"]
# weight only propagation
target_scope = ("read", 1)
weight_buffer = sch.get(main_block).reads[1].buffer
# checkout whether the weight buffer has dynamic symbol
def check_dynamic_symbol(buffer):
return any([isinstance(axis, tir.Var) for axis in buffer.shape])
if check_dynamic_symbol(weight_buffer):
print("[BitBLAS] Weight buffer has dynamic symbol, skip weight propagation.")
return False
transformed_block = find_last_producer_from_buffer(sch, main_block, weight_buffer)
if transformed_block is None:
return False
if transformed_block != main_block:
target_scope = ("read", 0)
reindex_block = sch.cache_read(transformed_block, target_scope[1], "global")
# create inter-warp memory layout index map
inter_warp_layout = IndexMap.from_func(
lambda i, j: (i // inter_j, j // inter_k, i % inter_j, j % inter_k))
inter_warp_layout = layout_propagate_chain(
sch,
main_block,
sch.get(main_block).reads[1].buffer,
reindex_block,
inter_warp_layout,
)
sch.transform_layout(
reindex_block,
("read", 0),
lambda i, j: inter_warp_layout.map_indices([i, j]),
)
arg_idx = find_arg_idx_from_buffer_chain(sch, reindex_block,
sch.get(reindex_block).reads[0].buffer)
intra_warp_layout = None
if self.transform_level.value >= TransformKind.IntraWarpTransform.value:
intra_warp_layout, _ = get_propagate_map(intrin_info["trans_b"])
intra_warp_layout = layout_propagate_chain(
sch,
main_block,
sch.get(main_block).reads[1].buffer,
reindex_block,
intra_warp_layout,
)
sch.transform_layout(
reindex_block,
("read", 0),
lambda i, j, ii, jj: (
i,
j,
*intra_warp_layout.map_indices([ii, jj]),
),
)
self.layout_transform_hints[g_var] = [
LayoutTransformHint(
transform_level=self.transform_level,
inter_warp_layout=inter_warp_layout,
intra_warp_layout=intra_warp_layout,
apply_arg_idx=arg_idx,
)
]
return sch.mod["main"]
def transform_module( # pylint: disable=missing-function-docstring
self,
mod: IRModule,
_: PassContext,
) -> IRModule:
if self.target.kind.name != "cuda":
# currently weight propagation only support nvidia gpus
return mod
propagate_candidates = {}
propagated_funcs = {} # some funcs may not be able to transform
candidates_intrin_info = {}
decoded_funcs = {}
for g_var, func in mod.functions_items():
if not isinstance(func, tir.PrimFunc):
continue
if g_var.name_hint != "main":
# Note: this can be applied to any function which can be transformed to matmul (e.g., conv2d)
# for mlc we only consider matmul
# detect the pattern
is_matmul, intrin_info = self.detect_propagate_matmul(func, self.target)
if (func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys()):
# currently we only support tensorize propagation
continue
if is_matmul:
if "dequantize_info" in func.attrs:
decoded_funcs[g_var] = func
if self.transform_level != TransformKind.NonTransform:
# lift tags to the function as it has intrinsic information that can be reused.
propagate_candidates[g_var] = func
candidates_intrin_info[g_var] = intrin_info
for g_var, func in propagate_candidates.items():
updated_func = self.transform_matmul(g_var, func, candidates_intrin_info[g_var])
if updated_func:
updated_func = updated_func.with_attrs({
"transform_kind": self.transform_level.value,
"weight_transform_kind": True,
})
propagated_funcs[g_var] = updated_func
mod[g_var] = updated_func
@relax.expr_functor.mutator
class TensorCoreLayoutMutator(PyExprMutator):
"""Mutator that performs transformation."""
def __init__(
self,
transform_level: TransformKind = TransformKind.NonTransform,
layout_transform_hints: Optional[Dict[str, List[LayoutTransformHint]]] = None,
):
if layout_transform_hints is None:
layout_transform_hints = {}
super().__init__()
self.transform_level = transform_level
self.layout_transform_hints = layout_transform_hints
def tc_layout_transform(self, call_node: Call) -> Call:
if self.transform_level == TransformKind.NonTransform:
return super().visit_call_(call_node)
g_var = call_node.args[0]
if g_var not in propagated_funcs:
return super().visit_call_(call_node)
args = list(call_node.args[1])
# assume we only have weight propagation currently
(weight_layout_hint,) = self.layout_transform_hints[g_var]
weight = args[weight_layout_hint.apply_arg_idx]
weight = self.builder_.emit(
relax.op.layout_transform(
weight,
index_map=lambda i, j: weight_layout_hint.inter_warp_layout.map_indices(
[i, j]),
))
if self.transform_level.value >= TransformKind.IntraWarpTransform.value:
weight = self.builder_.emit(
relax.op.layout_transform(
weight,
index_map=lambda i, j, ii, jj: (
i,
j,
*weight_layout_hint.intra_warp_layout.map_indices([ii, jj]),
),
))
call_node = self.builder_.emit(
relax.call_tir(
g_var,
args[:weight_layout_hint.apply_arg_idx] + [weight] +
args[weight_layout_hint.apply_arg_idx + 1:],
out_sinfo=call_node.struct_info,
))
return call_node
def visit_call_(self, call_node: Call):
return self.tc_layout_transform(call_node)
def transform(
self,
mod: IRModule,
):
for gv, func in mod.functions_items():
if isinstance(func, relax.Function):
updated_func = self.visit_expr(func)
self.builder_.update_func(gv, updated_func)
new_mod = self.builder_.get()
new_mod = new_mod.with_attrs(mod.attrs) if mod.attrs else new_mod
for gv, func in new_mod.functions_items():
mod.update_func(gv, func)
return mod
mod = TensorCoreLayoutMutator(
transform_level=self.transform_level,
layout_transform_hints=self.layout_transform_hints,
).transform(mod)
@relax.expr_functor.mutator
class FastTypeConversionLayoutMutator(PyExprMutator):
"""Mutator that performs transformation."""
def __init__(self, faster_conversion: bool = False):
super().__init__()
self.faster_conversion = faster_conversion
def lop3_layout_transform(self, call_node: Call) -> Call:
if not self.faster_conversion:
return super().visit_call_(call_node)
from bitblas.ops.impl import tir_interleave_weight
g_var = call_node.args[0]
if g_var not in decoded_funcs:
return super().visit_call_(call_node)
args = list(call_node.args[1])
func = decoded_funcs[g_var]
if "dequantize_info" not in func.attrs:
return super().visit_call_(call_node)
dequantize_info = dict(func.attrs["dequantize_info"])
assert len(dequantize_info) == 1
(weight_dequantize_info,) = dequantize_info.values()
sch = tir.Schedule(func)
dequantize_block = sch.get_block(weight_dequantize_info["decode_block"])
# weight is the first read buffer if format in ["int", "uint"], otherwise the second read buffer, nf .etc
source_format = weight_dequantize_info["source_format"]["format"]
source_bits = weight_dequantize_info["source_format"]["bits"]
target_dtype = weight_dequantize_info["target_format"]
if source_format in ["int", "uint"]:
weight_buffer = sch.get(dequantize_block).reads[0].buffer
elif source_format in ["nf"]:
weight_buffer = sch.get(dequantize_block).reads[1].buffer
else:
raise ValueError(f"Unsupported source format {source_format}")
# update func with dequantize_info
dequantize_info["fast_decoding"] = True
self.builder_.update_func(g_var,
func.with_attrs({"dequantize_info": dequantize_info}))
weight_idx = find_arg_idx_from_buffer_chain(sch, dequantize_block, weight_buffer)
weight = args[weight_idx]
weight_shape = weight_buffer.shape
# reshape the weight shape to 2d
reshape_weight = self.builder_.emit(
relax.op.reshape(weight, (-1, weight_shape[-1])))
# register g_var to the func
lop3_interleave_func = tir_interleave_weight(
N=reshape_weight.struct_info.shape[0],
QK=reshape_weight.struct_info.shape[1],
bits=source_bits,
target_dtype=target_dtype,
storage_dtype=reshape_weight.struct_info.dtype,
)
interleave_gvar = self.builder_.add_func(
lop3_interleave_func.without_attr("global_symbol"),
"tir_interleave_weight",
)
lop3_interleave_weight = self.builder_.emit(
relax.call_tir(
interleave_gvar,
[reshape_weight],
out_sinfo=reshape_weight.struct_info,
),)
reshape_weight = self.builder_.emit(
relax.op.reshape(lop3_interleave_weight, weight_shape))
call_node = self.builder_.emit(
relax.call_tir(
g_var,
args[:weight_idx] + [reshape_weight] + args[weight_idx + 1:],
out_sinfo=call_node.struct_info,
),)
return call_node
def visit_call_(self, call_node: Call):
return self.lop3_layout_transform(call_node)
def transform(
self,
mod: IRModule,
):
for gv, func in mod.functions_items():
if isinstance(func, relax.Function):
updated_func = self.visit_expr(func)
self.builder_.update_func(gv, updated_func)
new_mod = self.builder_.get()
new_mod = new_mod.with_attrs(mod.attrs) if mod.attrs else new_mod
for gv, func in new_mod.functions_items():
mod.update_func(gv, func)
return mod
mod = FastTypeConversionLayoutMutator(
faster_conversion=self.faster_conversion).transform(mod)
mod = relax.transform.LegalizeOps()(mod)
return mod
|
BitBLAS/python/bitblas/relax/transform/weight_only_propagate.py/0
|
{
"file_path": "BitBLAS/python/bitblas/relax/transform/weight_only_propagate.py",
"repo_id": "BitBLAS",
"token_count": 8746
}
| 152 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include <gtest/gtest.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "fast_decoding.hpp"
#define cudaCheckLastError(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
#define REGISTER_GLOBAL_DEVICE_INVOKER(kernel, function) \
template <typename... Args> \
__global__ void kernel(Args... args) \
{ \
function(args...); \
}
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4s_to_f16, decode_i4s_to_f16)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4u_to_f16, decode_i4u_to_f16)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2s_to_f16, decode_i2s_to_f16)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2u_to_f16, decode_i2u_to_f16)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1s_to_f16, decode_i1s_to_f16)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1u_to_f16, decode_i1u_to_f16)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4s_to_f16_scale, decode_i4s_to_f16_scale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4u_to_f16_scale, decode_i4u_to_f16_scale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2s_to_f16_scale, decode_i2s_to_f16_scale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2u_to_f16_scale, decode_i2u_to_f16_scale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1s_to_f16_scale, decode_i1s_to_f16_scale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1u_to_f16_scale, decode_i1u_to_f16_scale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4u_to_f16_scale_zeros_original, decode_i4u_to_f16_scale_zeros_original)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2u_to_f16_scale_zeros_original, decode_i2u_to_f16_scale_zeros_original)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1u_to_f16_scale_zeros_original, decode_i1u_to_f16_scale_zeros_original)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4u_to_f16_scale_zeros_rescale, decode_i4u_to_f16_scale_zeros_rescale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2u_to_f16_scale_zeros_rescale, decode_i2u_to_f16_scale_zeros_rescale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1u_to_f16_scale_zeros_rescale, decode_i1u_to_f16_scale_zeros_rescale)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4u_to_f16_scale_zeros_quantized, decode_i4u_to_f16_scale_zeros_quantized)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2u_to_f16_scale_zeros_quantized, decode_i2u_to_f16_scale_zeros_quantized)
TEST(DecodeTest, DecodeInt4ToFloat16)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt4ToFloat16)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt2ToFloat16)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
kernelWrapper_i2s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt2ToFloat16)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
kernelWrapper_i2u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt1ToFloat16)
{
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
kernelWrapper_i1s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 4, decoded_gpu + N / 4);
kernelWrapper_i1s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2);
kernelWrapper_i1s_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + +QN / 2 + QN / 4, decoded_gpu + +N / 2 + N / 4);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(2 * in_data[i] - 1, int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt1ToFloat16)
{
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
kernelWrapper_i1u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 4, decoded_gpu + N / 4);
kernelWrapper_i1u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2);
kernelWrapper_i1u_to_f16<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + +QN / 2 + QN / 4, decoded_gpu + +N / 2 + N / 4);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt4ToFloat16WithScaling)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(0.314)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt4ToFloat16WithScaling)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt2ToFloat16WithScaling)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(0.314)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu);
kernelWrapper_i2s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt2ToFloat16WithScaling)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.0)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu);
kernelWrapper_i2u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt1ToFloat16WithScaling)
{
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(0.314)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu);
kernelWrapper_i1s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 4, decoded_gpu + N / 4, scale_gpu);
kernelWrapper_i1s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu);
kernelWrapper_i1s_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2 + QN / 4, decoded_gpu + N / 2 + N / 4, scale_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt4ToFloat16WithScalingWithZerosOriginal)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
half zeros[1] = {__float2half(zero_point)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits));
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu, *zeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&zeros_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(zeros_gpu, zeros, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, zeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR((in_data[i] - float(zeros[0])) * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt2ToFloat16WithScalingWithZerosOriginal)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
half zeros[1] = {__float2half(zero_point)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits));
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu, *zeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&zeros_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(zeros_gpu, zeros, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, zeros_gpu);
kernelWrapper_i2u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu, zeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR((in_data[i] - float(zeros[0])) * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt1ToFloat16WithScalingWithZerosOriginal)
{
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
half zeros[1] = {__float2half(zero_point)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits));
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu, *zeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&zeros_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(zeros_gpu, zeros, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, zeros_gpu);
kernelWrapper_i1u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 4, decoded_gpu + N / 4, scale_gpu, zeros_gpu);
kernelWrapper_i1u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu, zeros_gpu);
kernelWrapper_i1u_to_f16_scale_zeros_original<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2 + QN / 4, decoded_gpu + N / 2 + N / 4, scale_gpu, zeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR((in_data[i] - float(zeros[0])) * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt1ToFloat16WithScaling)
{
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.0)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu);
kernelWrapper_i1u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 4, decoded_gpu + N / 4, scale_gpu);
kernelWrapper_i1u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu);
kernelWrapper_i1u_to_f16_scale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2 + QN / 4, decoded_gpu + N / 2 + N / 4, scale_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt4ToFloat16WithScalingWithZerosRescale)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
half zeros[1] = {__float2half(0.5)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu, *zeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&zeros_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(zeros_gpu, zeros, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, zeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]) - float(zeros[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt2ToFloat16WithScalingWithZerosRescale)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
half zeros[1] = {__float2half(0.5)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu, *zeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&zeros_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(zeros_gpu, zeros, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, zeros_gpu);
kernelWrapper_i2u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu, zeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]) - float(zeros[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt1ToFloat16WithScalingWithZerosRescale)
{
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
half scale[1] = {__float2half(1.2)};
half zeros[1] = {__float2half(0.5)};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu, *zeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&zeros_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(zeros_gpu, zeros, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, zeros_gpu);
kernelWrapper_i1u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 4, decoded_gpu + N / 4, scale_gpu, zeros_gpu);
kernelWrapper_i1u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu, zeros_gpu);
kernelWrapper_i1u_to_f16_scale_zeros_rescale<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2 + QN / 4, decoded_gpu + N / 2 + N / 4, scale_gpu, zeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(in_data[i] * float(scale[0]) - float(zeros[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt4ToFloat16WithScalingWithZerosQuantized)
{
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
// create four int8_t values
int8_t in_data[N] = {
0};
half scale[1] = {__float2half(1.2)};
uint qzeros[1] = {(1 << (nbits - 1)) - 1};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits));
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
uint *qzeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&qzeros_gpu, 1 * sizeof(uint)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(qzeros_gpu, qzeros, 1 * sizeof(uint), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4u_to_f16_scale_zeros_quantized<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, qzeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(((int)in_data[i] - (int)qzeros[0]) * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt2toFloat16WithScalingWithZerosQuantized)
{
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
// create four int8_t values
int8_t in_data[N] = {
0};
half scale[1] = {__float2half(1.2)};
uint qzeros[1] = {(1 << (nbits - 1)) - 1};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits));
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_fp16(ins, interleaved, nbits, QN * sizeof(int8_t), false);
half *decoded = new half[N];
int8_t *ins_gpu;
half *decoded_gpu, *scale_gpu;
uint *qzeros_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&scale_gpu, 1 * sizeof(half)));
cudaCheckLastError(cudaMalloc((void **)&qzeros_gpu, 1 * sizeof(uint)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(scale_gpu, scale, 1 * sizeof(half), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(qzeros_gpu, qzeros, 1 * sizeof(uint), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2u_to_f16_scale_zeros_quantized<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu, scale_gpu, qzeros_gpu);
kernelWrapper_i2u_to_f16_scale_zeros_quantized<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2, scale_gpu, qzeros_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(half), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_NEAR(((int)in_data[i] - (int)qzeros[0]) * float(scale[0]), float(decoded[i]), 1e-2);
}
free(ins);
free(interleaved);
free(decoded);
}
|
BitBLAS/testing/cpp/lop3_type_conversion/lowprecision_to_float16.cu/0
|
{
"file_path": "BitBLAS/testing/cpp/lop3_type_conversion/lowprecision_to_float16.cu",
"repo_id": "BitBLAS",
"token_count": 20778
}
| 153 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import bitblas
import numpy as np
from bitblas.quantization.utils import general_compress, interleave_weight
from bitblas.ops.matmul import MatmulWeightOnlyDequantize
M = 1
N = 4096
K = 1024
bitblas_matmul = MatmulWeightOnlyDequantize(
M=M,
N=N,
K=K,
in_dtype="float16",
out_dtype="float16",
accum_dtype="float16",
propagate_b=False,
bit=4,
storage_dtype="uint8",
source_format="int",
with_scaling=False,
group_size=128,
fast_decoding=False,
with_bias=False,
)
torch_arrs = []
torch_arrs.append(torch.randint(0, 10, (M, K), dtype=torch.float16, device="cuda"))
torch_arrs.append(torch.randint(0, 7, (N, K), dtype=torch.float16, device="cuda"))
torch_arrs.append(torch.zeros((M, K), dtype=torch.float16, device="cuda"))
print("torch: {}".format(torch_arrs[-1]))
|
BitBLAS/testing/python/weight_only/correctness/test_fp16xint4_correctness.py/0
|
{
"file_path": "BitBLAS/testing/python/weight_only/correctness/test_fp16xint4_correctness.py",
"repo_id": "BitBLAS",
"token_count": 446
}
| 154 |
date ; hostname ; pwd
EXP_NODES=1
EXP_IS=384
EXP_PGB=8
EXP_PGEB=32
EXP_LR=5e-6
EXP_BS=64
EXP_ME=10
EXP_WS=0.1
EXP_WD=0.008
EXP_LMH=10
EXP_LMC=5
EXP_THL=2
EXP_HHS=1.5
EXP_LP=BridgeTower_pt_base.ckpt
EXP_RGM=blip_randaug_wc
EXP_CDR=0.1
export MASTER_ADDR=$HOSTNAME
export MASTER_PORT=19800
export NODE_RANK=0
PREFIX_NAME="ftfpt"
echo $MASTER_ADDR, $MASTER_PORT, $NODE_RANK, $EXP_NODES, $EXP_IS, $EXP_PGB, $EXP_PGEB, $EXP_LR, $EXP_BS, $EXP_ME, $EXP_WS, $EXP_WD, $EXP_LMH, $EXP_LMC, $EXP_THL, $EXP_HHS, $EXP_RGM, $EXP_CDR
TIME=$(date "+%Y%m%d%H%M")
RUN_NAME=""$PREFIX_NAME"_"$EXP_IS"_"$EXP_PGB"_"$EXP_PGEB"_"$EXP_LR"_"$EXP_BS"_"$EXP_ME"_"$EXP_WS"_"$EXP_WD"_"$EXP_LMH"_"$EXP_LMC"_"$EXP_THL"_"$EXP_HHS"_"$EXP_RGM"_"$EXP_CDR"_"$TIME""
echo $RUN_NAME
python run.py with run_name=$RUN_NAME task_finetune_nlvr2_clip_bert bt clip16 text_roberta $EXP_RGM num_gpus=8 num_nodes=$EXP_NODES load_path=~/BT/best_checkpoints/$EXP_LP image_size=$EXP_IS per_gpu_batchsize=$EXP_PGB per_gpu_eval_batchsize=$EXP_PGEB learning_rate=$EXP_LR batch_size=$EXP_BS max_epoch=$EXP_ME warmup_steps=$EXP_WS weight_decay=$EXP_WD lr_mult_head=$EXP_LMH lr_mult_cross_modal=$EXP_LMC task_head_layers=$EXP_THL head_hidden_scale=$EXP_HHS nlvr2_drop_rate=$EXP_CDR
date
|
BridgeTower/scripts/ftfpt_base_nlvr2.sh/0
|
{
"file_path": "BridgeTower/scripts/ftfpt_base_nlvr2.sh",
"repo_id": "BridgeTower",
"token_count": 643
}
| 155 |
from ..datasets import CocoCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class CocoCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CocoCaptionKarpathyDataset
@property
def dataset_cls_no_false(self):
return CocoCaptionKarpathyDataset
@property
def dataset_name(self):
return "coco"
|
BridgeTower/src/datamodules/coco_caption_karpathy_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/coco_caption_karpathy_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 189
}
| 156 |
from glob import glob
from .base_dataset import BaseDataset
import io
from PIL import Image
class SBUCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = [f"sbu_{i}" for i in range(9)]
elif split == "val":
names = []
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
|
BridgeTower/src/datasets/sbu_caption_dataset.py/0
|
{
"file_path": "BridgeTower/src/datasets/sbu_caption_dataset.py",
"repo_id": "BridgeTower",
"token_count": 253
}
| 157 |
""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import logging
import math
from copy import deepcopy
from typing import Optional
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from .swin_helpers import swin_build_model_with_cfg
from timm.models.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import checkpoint_filter_fn, _init_vit_weights
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'swin_base_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_base_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',
),
'swin_large_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_large_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',
),
'swin_small_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',
),
'swin_tiny_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',
),
'swin_base_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_base_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
}
def window_partition(x, window_size: int):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, weight_init='', **kwargs):
super().__init__()
window_size=int(img_size/32)
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
self.patch_grid = self.patch_embed.grid_size
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
else:
self.absolute_pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
layers = []
for i_layer in range(self.num_layers):
layers += [BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
if weight_init.startswith('jax'):
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
self.apply(_init_vit_weights)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.absolute_pos_embed is not None:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x = self.layers(x)
x = self.norm(x) # B L C
return x
def forward(self, x):
x = self.forward_features(x)
return x
def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs['config']['image_size']
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = swin_build_model_with_cfg(
SwinTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_strict=False,
**kwargs)
return model
@register_model
def swin_base_patch4_window12_384(pretrained=False, **kwargs):
""" Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window12_384(pretrained=False, **kwargs):
""" Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_small_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_tiny_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-T @ 224x224, trained ImageNet-1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs):
""" Swin-B @ 384x384, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs):
""" Swin-B @ 224x224, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs):
""" Swin-L @ 384x384, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs):
""" Swin-L @ 224x224, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)
|
BridgeTower/src/modules/swin_transformer.py/0
|
{
"file_path": "BridgeTower/src/modules/swin_transformer.py",
"repo_id": "BridgeTower",
"token_count": 12225
}
| 158 |
import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions):
name = path.split("/")[-1]
iid = int(name[:-4])
with open(path, "rb") as fp:
binary = fp.read()
cdicts = iid2captions[iid]
captions = [c["phrase"] for c in cdicts]
widths = [c["width"] for c in cdicts]
heights = [c["height"] for c in cdicts]
xs = [c["x"] for c in cdicts]
ys = [c["y"] for c in cdicts]
return [
binary,
captions,
widths,
heights,
xs,
ys,
str(iid),
]
def make_arrow(root, dataset_root):
with open(f"{root}/annotations/region_descriptions.json", "r") as fp:
captions = json.load(fp)
iid2captions = defaultdict(list)
for cap in tqdm(captions):
cap = cap["regions"]
for c in cap:
iid2captions[c["image_id"]].append(c)
paths = list(glob(f"{root}/images/VG_100K/*.jpg")) + list(
glob(f"{root}/images/VG_100K_2/*.jpg")
)
random.shuffle(paths)
caption_paths = [
path for path in paths if int(path.split("/")[-1][:-4]) in iid2captions
]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions) for path in tqdm(caption_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "width", "height", "x", "y", "image_id"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/vg.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
make_arrow('~/BT/dataset/vg', '~/BT/dataset/pre-train')
|
BridgeTower/src/utils/write_vg.py/0
|
{
"file_path": "BridgeTower/src/utils/write_vg.py",
"repo_id": "BridgeTower",
"token_count": 923
}
| 159 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d
import torch.nn.utils.spectral_norm as spectral_norm
def get_nonspade_norm_layer(opt, norm_type="instance"):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, "out_channels"):
return getattr(layer, "out_channels")
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith("spectral"):
layer = spectral_norm(layer)
subnorm_type = norm_type[len("spectral") :]
if subnorm_type == "none" or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, "bias", None) is not None:
delattr(layer, "bias")
layer.register_parameter("bias", None)
if subnorm_type == "batch":
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == "sync_batch":
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == "instance":
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
else:
raise ValueError("normalization layer %s is not recognized" % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc, opt):
super().__init__()
assert config_text.startswith("spade")
parsed = re.search("spade(\D+)(\d)x\d", config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
self.opt = opt
if param_free_norm_type == "instance":
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == "syncbatch":
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == "batch":
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError("%s is not a recognized param-free norm type in SPADE" % param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
if self.opt.no_parsing_map:
self.mlp_shared = nn.Sequential(nn.Conv2d(3, nhidden, kernel_size=ks, padding=pw), nn.ReLU())
else:
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc + 3, nhidden, kernel_size=ks, padding=pw), nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap, degraded_image):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode="nearest")
degraded_face = F.interpolate(degraded_image, size=x.size()[2:], mode="bilinear")
if self.opt.no_parsing_map:
actv = self.mlp_shared(degraded_face)
else:
actv = self.mlp_shared(torch.cat((segmap, degraded_face), dim=1))
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/normalization.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/normalization.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1711
}
| 160 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import random
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.resize_or_crop == 'resize_and_crop':
new_h = new_w = opt.loadSize
if opt.resize_or_crop == 'scale_width_and_crop': # we scale the shorter side into 256
if w<h:
new_w = opt.loadSize
new_h = opt.loadSize * h // w
else:
new_h=opt.loadSize
new_w = opt.loadSize * w // h
if opt.resize_or_crop=='crop_only':
pass
x = random.randint(0, np.maximum(0, new_w - opt.fineSize))
y = random.randint(0, np.maximum(0, new_h - opt.fineSize))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params, method=Image.BICUBIC, normalize=True):
transform_list = []
if 'resize' in opt.resize_or_crop:
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, method))
elif 'scale_width' in opt.resize_or_crop:
# transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method))) ## Here , We want the shorter side to match 256, and Scale will finish it.
transform_list.append(transforms.Scale(256,method))
if 'crop' in opt.resize_or_crop:
if opt.isTrain:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize)))
else:
if opt.test_random_crop:
transform_list.append(transforms.RandomCrop(opt.fineSize))
else:
transform_list.append(transforms.CenterCrop(opt.fineSize))
## when testing, for ablation study, choose center_crop directly.
if opt.resize_or_crop == 'none':
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def normalize():
return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
|
Bringing-Old-Photos-Back-to-Life/Global/data/base_dataset.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/data/base_dataset.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1626
}
| 161 |
import tempfile
from pathlib import Path
import argparse
import shutil
import os
import glob
import cv2
import cog
from run import run_cmd
class Predictor(cog.Predictor):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_folder", type=str, default="input/cog_temp", help="Test images"
)
parser.add_argument(
"--output_folder",
type=str,
default="output",
help="Restored images, please use the absolute path",
)
parser.add_argument("--GPU", type=str, default="0", help="0,1,2")
parser.add_argument(
"--checkpoint_name",
type=str,
default="Setting_9_epoch_100",
help="choose which checkpoint",
)
self.opts = parser.parse_args("")
self.basepath = os.getcwd()
self.opts.input_folder = os.path.join(self.basepath, self.opts.input_folder)
self.opts.output_folder = os.path.join(self.basepath, self.opts.output_folder)
os.makedirs(self.opts.input_folder, exist_ok=True)
os.makedirs(self.opts.output_folder, exist_ok=True)
@cog.input("image", type=Path, help="input image")
@cog.input(
"HR",
type=bool,
default=False,
help="whether the input image is high-resolution",
)
@cog.input(
"with_scratch",
type=bool,
default=False,
help="whether the input image is scratched",
)
def predict(self, image, HR=False, with_scratch=False):
try:
os.chdir(self.basepath)
input_path = os.path.join(self.opts.input_folder, os.path.basename(image))
shutil.copy(str(image), input_path)
gpu1 = self.opts.GPU
## Stage 1: Overall Quality Improve
print("Running Stage 1: Overall restoration")
os.chdir("./Global")
stage_1_input_dir = self.opts.input_folder
stage_1_output_dir = os.path.join(
self.opts.output_folder, "stage_1_restore_output"
)
os.makedirs(stage_1_output_dir, exist_ok=True)
if not with_scratch:
stage_1_command = (
"python test.py --test_mode Full --Quality_restore --test_input "
+ stage_1_input_dir
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1
)
run_cmd(stage_1_command)
else:
mask_dir = os.path.join(stage_1_output_dir, "masks")
new_input = os.path.join(mask_dir, "input")
new_mask = os.path.join(mask_dir, "mask")
stage_1_command_1 = (
"python detection.py --test_path "
+ stage_1_input_dir
+ " --output_dir "
+ mask_dir
+ " --input_size full_size"
+ " --GPU "
+ gpu1
)
if HR:
HR_suffix = " --HR"
else:
HR_suffix = ""
stage_1_command_2 = (
"python test.py --Scratch_and_Quality_restore --test_input "
+ new_input
+ " --test_mask "
+ new_mask
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1
+ HR_suffix
)
run_cmd(stage_1_command_1)
run_cmd(stage_1_command_2)
## Solve the case when there is no face in the old photo
stage_1_results = os.path.join(stage_1_output_dir, "restored_image")
stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output")
os.makedirs(stage_4_output_dir, exist_ok=True)
for x in os.listdir(stage_1_results):
img_dir = os.path.join(stage_1_results, x)
shutil.copy(img_dir, stage_4_output_dir)
print("Finish Stage 1 ...")
print("\n")
## Stage 2: Face Detection
print("Running Stage 2: Face Detection")
os.chdir(".././Face_Detection")
stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_2_output_dir = os.path.join(
self.opts.output_folder, "stage_2_detection_output"
)
os.makedirs(stage_2_output_dir, exist_ok=True)
stage_2_command = (
"python detect_all_dlib_HR.py --url "
+ stage_2_input_dir
+ " --save_url "
+ stage_2_output_dir
)
run_cmd(stage_2_command)
print("Finish Stage 2 ...")
print("\n")
## Stage 3: Face Restore
print("Running Stage 3: Face Enhancement")
os.chdir(".././Face_Enhancement")
stage_3_input_mask = "./"
stage_3_input_face = stage_2_output_dir
stage_3_output_dir = os.path.join(
self.opts.output_folder, "stage_3_face_output"
)
os.makedirs(stage_3_output_dir, exist_ok=True)
self.opts.checkpoint_name = "FaceSR_512"
stage_3_command = (
"python test_face.py --old_face_folder "
+ stage_3_input_face
+ " --old_face_label_folder "
+ stage_3_input_mask
+ " --tensorboard_log --name "
+ self.opts.checkpoint_name
+ " --gpu_ids "
+ gpu1
+ " --load_size 512 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 1 --results_dir "
+ stage_3_output_dir
+ " --no_parsing_map"
)
run_cmd(stage_3_command)
print("Finish Stage 3 ...")
print("\n")
## Stage 4: Warp back
print("Running Stage 4: Blending")
os.chdir(".././Face_Detection")
stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img")
stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output")
os.makedirs(stage_4_output_dir, exist_ok=True)
stage_4_command = (
"python align_warp_back_multiple_dlib_HR.py --origin_url "
+ stage_4_input_image_dir
+ " --replace_url "
+ stage_4_input_face_dir
+ " --save_url "
+ stage_4_output_dir
)
run_cmd(stage_4_command)
print("Finish Stage 4 ...")
print("\n")
print("All the processing is done. Please check the results.")
final_output = os.listdir(os.path.join(self.opts.output_folder, "final_output"))[0]
image_restore = cv2.imread(os.path.join(self.opts.output_folder, "final_output", final_output))
out_path = Path(tempfile.mkdtemp()) / "out.png"
cv2.imwrite(str(out_path), image_restore)
finally:
clean_folder(self.opts.input_folder)
clean_folder(self.opts.output_folder)
return out_path
def clean_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f"Failed to delete {file_path}. Reason:{e}")
|
Bringing-Old-Photos-Back-to-Life/predict.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/predict.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 4489
}
| 162 |
import torch
import torch.nn as nn
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_flops(model, audio_length):
"""Count flops. Code modified from others' implementation.
"""
multiply_adds = True
list_conv2d=[]
def conv2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv2d.append(flops)
list_conv1d=[]
def conv1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_conv1d.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement() * 2)
list_pooling2d=[]
def pooling2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling2d.append(flops)
list_pooling1d=[]
def pooling1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0]
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_pooling2d.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, nn.Conv2d):
net.register_forward_hook(conv2d_hook)
elif isinstance(net, nn.Conv1d):
net.register_forward_hook(conv1d_hook)
elif isinstance(net, nn.Linear):
net.register_forward_hook(linear_hook)
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
net.register_forward_hook(bn_hook)
elif isinstance(net, nn.ReLU):
net.register_forward_hook(relu_hook)
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
net.register_forward_hook(pooling2d_hook)
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
net.register_forward_hook(pooling1d_hook)
else:
print('Warning: flop of module {} is not counted!'.format(net))
return
for c in childrens:
foo(c)
# Register hook
foo(model)
device = device = next(model.parameters()).device
input = torch.rand(1, audio_length).to(device)
out = model(input)
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
return total_flops
|
CLAP/msclap/models/pytorch_utils.py/0
|
{
"file_path": "CLAP/msclap/models/pytorch_utils.py",
"repo_id": "CLAP",
"token_count": 2744
}
| 163 |
.. fairseq documentation master file, created by
sphinx-quickstart on Fri Aug 17 21:45:30 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
:github_url: https://github.com/pytorch/fairseq
fairseq documentation
=====================
Fairseq is a sequence modeling toolkit written in `PyTorch
<http://pytorch.org/>`_ that allows researchers and developers to
train custom models for translation, summarization, language modeling and other
text generation tasks.
.. toctree::
:maxdepth: 1
:caption: Getting Started
getting_started
command_line_tools
.. toctree::
:maxdepth: 1
:caption: Extending Fairseq
overview
tutorial_simple_lstm
tutorial_classifying_names
.. toctree::
:maxdepth: 2
:caption: Library Reference
tasks
models
criterions
optim
lr_scheduler
data
modules
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`
|
COCO-LM/fairseq/docs/index.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/index.rst",
"repo_id": "COCO-LM",
"token_count": 333
}
| 164 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveMask(nn.Module):
"""Soft masking function for adaptive size.
It masks out the last K values of an input. The masking value
goes from 1 to 0 gradually, so K can be learned with
back-propagation.
Args:
max_size: maximum size (i.e. input dimension)
ramp_size: size of the ramp going from 0 to 1
init_val: initial size proportion not to be masked out
shape: learn multiple sizes independent of each other
"""
def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)):
nn.Module.__init__(self)
self._max_size = max_size
self._ramp_size = ramp_size
self.current_val = nn.Parameter(torch.zeros(*shape) + init_val)
mask_template = torch.linspace(1 - max_size, 0, steps=max_size)
self.register_buffer("mask_template", mask_template)
def forward(self, x):
mask = self.mask_template.float() + self.current_val.float() * self._max_size
mask = mask / self._ramp_size + 1
mask = mask.clamp(0, 1)
if x.size(-1) < self._max_size:
# the input could have been trimmed beforehand to save computation
mask = mask.narrow(-1, self._max_size - x.size(-1), x.size(-1))
x = (x * mask).type_as(x)
return x
def get_current_max_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.max().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def get_current_avg_size(self, include_ramp=True):
current_size = math.ceil(
self.current_val.float().mean().item() * self._max_size
)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def clamp_param(self):
"""this need to be called after each update"""
self.current_val.data.clamp_(0, 1)
class AdaptiveSpan(nn.Module):
"""Adaptive attention span for Transformerself.
This module learns an attention span length from data for each
self-attention head.
Args:
attn_span: maximum attention span
adapt_span_loss: loss coefficient for the span length
adapt_span_ramp: length of the masking ramp
adapt_span_init: initial size ratio
adapt_span_cache: adapt cache size to reduce memory usage
"""
def __init__(
self,
attn_span,
adapt_span_ramp,
adapt_span_init,
n_head,
adapt_span_layer,
**kargs
):
nn.Module.__init__(self)
self._max_span = attn_span
self._n_head = n_head
self._adapt_span_layer = adapt_span_layer
if self._adapt_span_layer:
self._mask = AdaptiveMask(
max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
)
else:
self._mask = AdaptiveMask(
max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
shape=(n_head, 1, 1),
)
def forward(self, attn, normalize=True):
"""mask attention with the right span"""
# batch and head dimensions are merged together, so separate them first
self.clamp_param()
if self._adapt_span_layer:
attn = self._mask(attn)
else:
B = attn.size(0) # batch size
M = attn.size(1) # block size
attn = attn.reshape(B // self._n_head, self._n_head, M, -1)
attn = self._mask(attn)
attn = attn.view(B, M, -1)
return attn
def get_trim_len(self):
"""how much of memory can be trimmed to reduce computation"""
L = self._max_span
trim_len = min(L - 1, L - self._mask.get_current_max_size())
# too fine granularity might be bad for the memory management
trim_len = math.floor(trim_len / 64) * 64
return trim_len
def trim_memory(self, query, key, value, key_pe):
"""trim out unnecessary memory beforehand to reduce computation"""
trim_len = self.get_trim_len()
cache_size = key.size(1) - query.size(1)
trim_len_cache = trim_len - (self._max_span - cache_size)
if trim_len_cache > 0:
key = key[:, trim_len_cache:, :]
value = value[:, trim_len_cache:, :]
elif trim_len_cache < 0:
# cache is too short! this happens when validation resumes
# after a lot of updates.
key = F.pad(key, [0, 0, -trim_len_cache, 0])
value = F.pad(value, [0, 0, -trim_len_cache, 0])
if trim_len > 0:
if key_pe is not None:
key_pe = key_pe[:, :, trim_len:]
return key, value, key_pe
def get_cache_size(self):
"""determine how long the cache should be"""
trim_len = self.get_trim_len()
# give a buffer of 64 steps since a span might increase
# in future updates
return min(self._max_span, self._max_span - trim_len + 64)
def get_loss(self):
"""a loss term for regularizing the span length"""
return self._max_span * self._mask.current_val.float().mean()
def get_current_max_span(self):
return self._mask.get_current_max_size()
def get_current_avg_span(self):
return self._mask.get_current_avg_size()
def clamp_param(self):
self._mask.clamp_param()
|
COCO-LM/fairseq/examples/adaptive_span/adaptive_span_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/adaptive_span/adaptive_span_attention.py",
"repo_id": "COCO-LM",
"token_count": 2622
}
| 165 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as op
from collections import namedtuple
from multiprocessing import cpu_count
from typing import List, Optional
import sentencepiece as sp
from fairseq.data.encoders.byte_bpe import ByteBPE
from fairseq.data.encoders.byte_utils import byte_encode
from fairseq.data.encoders.bytes import Bytes
from fairseq.data.encoders.characters import Characters
from fairseq.data.encoders.moses_tokenizer import MosesTokenizer
from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
SPLITS = ["train", "valid", "test"]
def _convert_xml(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
ss = s.strip()
if not ss.startswith("<seg"):
continue
ss = ss.replace("</seg>", "").split('">')
assert len(ss) == 2
f_o.write(ss[1].strip() + "\n")
def _convert_train(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
ss = s.strip()
if ss.startswith("<"):
continue
f_o.write(ss.strip() + "\n")
def _get_bytes(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(Bytes.encode(s.strip()) + "\n")
def _get_chars(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(Characters.encode(s.strip()) + "\n")
def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
Args = namedtuple(
"Args",
[
"moses_source_lang",
"moses_target_lang",
"moses_no_dash_splits",
"moses_no_escape",
],
)
args = Args(
moses_source_lang=src,
moses_target_lang=tgt,
moses_no_dash_splits=False,
moses_no_escape=False,
)
pretokenizer = MosesTokenizer(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(pretokenizer.encode(s.strip()) + "\n")
def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
with open(out_path, "w") as f_o:
for lang in [src, tgt]:
with open(f"{in_path_prefix}.{lang}") as f:
for s in f:
f_o.write(byte_encode(s.strip()) + "\n")
def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
arguments = [
f"--input={in_path}",
f"--model_prefix={model_prefix}",
f"--model_type=bpe",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
"--normalization_rule_name=identity",
f"--num_threads={cpu_count()}",
]
sp.SentencePieceTrainer.Train(" ".join(arguments))
def _apply_bbpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple("Args", ["sentencepiece_model_path"])
args = Args(sentencepiece_model_path=model_path)
tokenizer = ByteBPE(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + "\n")
def _apply_bpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple("Args", ["sentencepiece_model"])
args = Args(sentencepiece_model=model_path)
tokenizer = SentencepieceBPE(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + "\n")
def _concat_files(in_paths: List[str], out_path: str):
with open(out_path, "w") as f_o:
for p in in_paths:
with open(p) as f:
for r in f:
f_o.write(r)
def preprocess_iwslt17(
root: str,
src: str,
tgt: str,
bpe_size: Optional[int],
need_chars: bool,
bbpe_size: Optional[int],
need_bytes: bool,
):
# extract bitext
in_root = op.join(root, f"{src}-{tgt}")
for lang in [src, tgt]:
_convert_train(
op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"),
op.join(root, f"train.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"),
op.join(root, f"valid.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"),
op.join(root, f"test.{lang}"),
)
# pre-tokenize
for lang in [src, tgt]:
for split in SPLITS:
pretokenize(
op.join(root, f"{split}.{lang}"),
op.join(root, f"{split}.moses.{lang}"),
src,
tgt,
)
# tokenize with BPE vocabulary
if bpe_size is not None:
# learn vocabulary
concated_train_path = op.join(root, "train.all")
_concat_files(
[op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")],
concated_train_path,
)
bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}")
_get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
os.remove(concated_train_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bpe(
bpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"),
)
# tokenize with bytes vocabulary
if need_bytes:
for lang in [src, tgt]:
for split in SPLITS:
_get_bytes(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bytes.{lang}"),
)
# tokenize with characters vocabulary
if need_chars:
for lang in [src, tgt]:
for split in SPLITS:
_get_chars(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.chars.{lang}"),
)
# tokenize with byte-level BPE vocabulary
if bbpe_size is not None:
# learn vocabulary
bchar_path = op.join(root, "train.bchar")
_convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path)
bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}")
_get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
os.remove(bchar_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bbpe(
bbpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"),
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=str, default="data")
parser.add_argument(
"--bpe-vocab",
default=None,
type=int,
help="Generate tokenized bitext with BPE of size K."
"Default to None (disabled).",
)
parser.add_argument(
"--bbpe-vocab",
default=None,
type=int,
help="Generate tokenized bitext with BBPE of size K."
"Default to None (disabled).",
)
parser.add_argument(
"--byte-vocab",
action="store_true",
help="Generate tokenized bitext with bytes vocabulary",
)
parser.add_argument(
"--char-vocab",
action="store_true",
help="Generate tokenized bitext with chars vocabulary",
)
args = parser.parse_args()
preprocess_iwslt17(
args.root,
"fr",
"en",
args.bpe_vocab,
args.char_vocab,
args.bbpe_vocab,
args.byte_vocab,
)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/byte_level_bpe/get_bitext.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/byte_level_bpe/get_bitext.py",
"repo_id": "COCO-LM",
"token_count": 4114
}
| 166 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .laser_task import * # noqa
from .laser_lstm import * # noqa
from .laser_transformer import * # noqa
|
COCO-LM/fairseq/examples/laser/laser_src/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/laser/laser_src/__init__.py",
"repo_id": "COCO-LM",
"token_count": 87
}
| 167 |
# Linformer: Self-Attention with Linear Complexity (Wang et al., 2020)
This example contains code to train Linformer models as described in our paper
[Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768).
## Training a new Linformer RoBERTa model
You can mostly follow the [RoBERTa pretraining README](/examples/roberta/README.pretraining.md),
updating your training command with `--user-dir examples/linformer/linformer_src --arch linformer_roberta_base`.
## Citation
If you use our work, please cite:
```bibtex
@article{wang2020linformer,
title={Linformer: Self-Attention with Linear Complexity},
author={Wang, Sinong and Li, Belinda and Khabsa, Madian and Fang, Han and Ma, Hao},
journal={arXiv preprint arXiv:2006.04768},
year={2020}
}
```
|
COCO-LM/fairseq/examples/linformer/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/linformer/README.md",
"repo_id": "COCO-LM",
"token_count": 249
}
| 168 |
#!/usr/bin/env bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
SCRIPT=`realpath $0`
MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2
export PATH=$PATH:"$MECAB/bin":"$MECAB/lib"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib"
cat - | mecab -O wakati
|
COCO-LM/fairseq/examples/m2m_100/tokenizers/seg_ko.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/m2m_100/tokenizers/seg_ko.sh",
"repo_id": "COCO-LM",
"token_count": 153
}
| 169 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
from utils.dedup import deup
import sys
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--from-folder", type=str, required=True,
help="the data folder to be dedup")
parser.add_argument("--to-folder", type=str, required=True,
help="the data folder to save deduped data")
parser.add_argument('--directions', type=str, default=None, required=False)
args = parser.parse_args()
if args.directions is None:
raw_files = glob.glob(f'{args.from_folder}/train*')
directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
else:
directions = args.directions.split(',')
directions = sorted(set(directions))
for direction in directions:
src, tgt = direction.split('-')
src_file = f'{args.from_folder}/train.{src}-{tgt}.{src}'
tgt_file = f'{args.from_folder}/train.{src}-{tgt}.{tgt}'
src_file_out = f'{args.to_folder}/train.{src}-{tgt}.{src}'
tgt_file_out = f'{args.to_folder}/train.{src}-{tgt}.{tgt}'
assert src_file != src_file_out
assert tgt_file != tgt_file_out
print(f'deduping {src_file}, {tgt_file}')
deup(src_file, tgt_file, src_file_out, tgt_file_out)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/multilingual/data_scripts/dedup_all.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/dedup_all.py",
"repo_id": "COCO-LM",
"token_count": 762
}
| 170 |
grep "seg id" | sed 's/<seg id="[0-9]\+">//g' | sed 's/<\/seg>//g'
|
COCO-LM/fairseq/examples/multilingual/data_scripts/utils/strip_sgm.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/utils/strip_sgm.sh",
"repo_id": "COCO-LM",
"token_count": 42
}
| 171 |
#!/usr/bin/env python3 -u
import argparse
import fileinput
import logging
import os
import sys
from fairseq.models.transformer import TransformerModel
logging.getLogger().setLevel(logging.INFO)
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--en2fr", required=True, help="path to en2fr model")
parser.add_argument(
"--fr2en", required=True, help="path to fr2en mixture of experts model"
)
parser.add_argument(
"--user-dir", help="path to fairseq examples/translation_moe/src directory"
)
parser.add_argument(
"--num-experts",
type=int,
default=10,
help="(keep at 10 unless using a different model)",
)
parser.add_argument(
"files",
nargs="*",
default=["-"],
help='input files to paraphrase; "-" for stdin',
)
args = parser.parse_args()
if args.user_dir is None:
args.user_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # examples/
"translation_moe",
"src",
)
if os.path.exists(args.user_dir):
logging.info("found user_dir:" + args.user_dir)
else:
raise RuntimeError(
"cannot find fairseq examples/translation_moe/src "
"(tried looking here: {})".format(args.user_dir)
)
logging.info("loading en2fr model from:" + args.en2fr)
en2fr = TransformerModel.from_pretrained(
model_name_or_path=args.en2fr,
tokenizer="moses",
bpe="sentencepiece",
).eval()
logging.info("loading fr2en model from:" + args.fr2en)
fr2en = TransformerModel.from_pretrained(
model_name_or_path=args.fr2en,
tokenizer="moses",
bpe="sentencepiece",
user_dir=args.user_dir,
task="translation_moe",
).eval()
def gen_paraphrases(en):
fr = en2fr.translate(en)
return [
fr2en.translate(fr, inference_step_args={"expert": i})
for i in range(args.num_experts)
]
logging.info("Type the input sentence and press return:")
for line in fileinput.input(args.files):
line = line.strip()
if len(line) == 0:
continue
for paraphrase in gen_paraphrases(line):
print(paraphrase)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/paraphraser/paraphrase.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/paraphraser/paraphrase.py",
"repo_id": "COCO-LM",
"token_count": 1100
}
| 172 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_prediction_r3f")
class SentencePredictionR3F(FairseqCriterion):
def __init__(
self,
task,
eps,
r3f_lambda,
noise_type,
classification_head_name,
regression_target,
):
super().__init__(task)
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
self.classification_head_name = classification_head_name
self.regression_target = regression_target
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument('--classification-head-name',
default='sentence_classification_head',
help='name of the classification head to use')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
token_embeddings = model.encoder.sentence_encoder.embed_tokens(
sample["net_input"]["src_tokens"]
)
input_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=token_embeddings,
)
if model.training and self.noise_sampler:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.detach().clone() + noise
noised_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=noised_embeddings,
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
else:
symm_kl = 0
targets = model.get_targets(sample, [input_logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
loss = F.nll_loss(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
else:
logits = input_logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = input_logits.max(dim=1)[1]
logging_output.update(ncorrect=(preds == targets).sum().item())
if model.training and self.noise_sampler:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"symm_kl": symm_kl_sum / sample_size,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
agg_output.update(accuracy=ncorrect / nsentences)
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
|
COCO-LM/fairseq/examples/rxf/rxf_src/sentence_prediction_r3f.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/rxf/rxf_src/sentence_prediction_r3f.py",
"repo_id": "COCO-LM",
"token_count": 3192
}
| 173 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import sys
from scorers import build_scorer
from tornado import ioloop, web
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 12321
class ScorerHandler(web.RequestHandler):
def initialize(self, scorer):
self.scorer = scorer
class EvalSessionHandler(ScorerHandler):
def post(self):
self.scorer.reset()
def get(self):
r = json.dumps(self.scorer.get_info())
self.write(r)
class ResultHandler(ScorerHandler):
def get(self):
r = json.dumps(self.scorer.score())
self.write(r)
class SourceHandler(ScorerHandler):
def get(self):
sent_id = int(self.get_argument("sent_id"))
segment_size = None
if "segment_size" in self.request.arguments:
string = self.get_argument("segment_size")
if len(string) > 0:
segment_size = int(string)
r = json.dumps(self.scorer.send_src(int(sent_id), segment_size))
self.write(r)
class HypothesisHandler(ScorerHandler):
def put(self):
sent_id = int(self.get_argument("sent_id"))
list_of_tokens = self.request.body.decode("utf-8").strip().split()
self.scorer.recv_hyp(sent_id, list_of_tokens)
def add_args():
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME,
help='Server hostname')
parser.add_argument('--port', type=int, default=DEFAULT_PORT,
help='Server port number')
args, _ = parser.parse_known_args()
# fmt: on
return args
def start_server(scorer, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, debug=False):
app = web.Application(
[
(r"/result", ResultHandler, dict(scorer=scorer)),
(r"/src", SourceHandler, dict(scorer=scorer)),
(r"/hypo", HypothesisHandler, dict(scorer=scorer)),
(r"/", EvalSessionHandler, dict(scorer=scorer)),
],
debug=debug,
)
app.listen(port, max_buffer_size=1024 ** 3)
sys.stdout.write(f"Evaluation Server Started. Listening to port {port}\n")
ioloop.IOLoop.current().start()
if __name__ == "__main__":
args = add_args()
scorer = build_scorer(args)
start_server(scorer, args.hostname, args.port, args.debug)
|
COCO-LM/fairseq/examples/simultaneous_translation/eval/server.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/server.py",
"repo_id": "COCO-LM",
"token_count": 1059
}
| 174 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
|
COCO-LM/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py",
"repo_id": "COCO-LM",
"token_count": 2351
}
| 175 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
|
COCO-LM/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py",
"repo_id": "COCO-LM",
"token_count": 2915
}
| 176 |
import math
import os
import json
import numpy as np
import torch
import torchaudio.compliance.kaldi as kaldi
import yaml
from fairseq import checkpoint_utils, tasks
from fairseq.file_io import PathManager
try:
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from simuleval.agents import SpeechAgent
from simuleval.states import ListEntry, SpeechStates
except ImportError:
print("Please install simuleval 'pip install simuleval'")
SHIFT_SIZE = 10
WINDOW_SIZE = 25
SAMPLE_RATE = 16000
FEATURE_DIM = 80
BOW_PREFIX = "\u2581"
class OnlineFeatureExtractor:
"""
Extract speech feature on the fly.
"""
def __init__(self, args):
self.shift_size = args.shift_size
self.window_size = args.window_size
assert self.window_size >= self.shift_size
self.sample_rate = args.sample_rate
self.feature_dim = args.feature_dim
self.num_samples_per_shift = int(self.shift_size * self.sample_rate / 1000)
self.num_samples_per_window = int(self.window_size * self.sample_rate / 1000)
self.len_ms_to_samples = lambda x: x * self.sample_rate / 1000
self.previous_residual_samples = []
self.global_cmvn = args.global_cmvn
def clear_cache(self):
self.previous_residual_samples = []
def __call__(self, new_samples):
samples = self.previous_residual_samples + new_samples
if len(samples) < self.num_samples_per_window:
self.previous_residual_samples = samples
return
# num_frames is the number of frames from the new segment
num_frames = math.floor(
(len(samples) - self.len_ms_to_samples(self.window_size - self.shift_size))
/ self.num_samples_per_shift
)
# the number of frames used for feature extraction
# including some part of thte previous segment
effective_num_samples = int(
num_frames * self.len_ms_to_samples(self.shift_size)
+ self.len_ms_to_samples(self.window_size - self.shift_size)
)
input_samples = samples[:effective_num_samples]
self.previous_residual_samples = samples[
num_frames * self.num_samples_per_shift:
]
torch.manual_seed(1)
output = kaldi.fbank(
torch.FloatTensor(input_samples).unsqueeze(0),
num_mel_bins=self.feature_dim,
frame_length=self.window_size,
frame_shift=self.shift_size,
).numpy()
output = self.transform(output)
return torch.from_numpy(output)
def transform(self, input):
if self.global_cmvn is None:
return input
mean = self.global_cmvn["mean"]
std = self.global_cmvn["std"]
x = np.subtract(input, mean)
x = np.divide(x, std)
return x
class TensorListEntry(ListEntry):
"""
Data structure to store a list of tensor.
"""
def append(self, value):
if len(self.value) == 0:
self.value = value
return
self.value = torch.cat([self.value] + [value], dim=0)
def info(self):
return {
"type": str(self.new_value_type),
"length": self.__len__(),
"value": "" if type(self.value) is list else self.value.size(),
}
class FairseqSimulSTAgent(SpeechAgent):
speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size
def __init__(self, args):
super().__init__(args)
self.eos = DEFAULT_EOS
self.gpu = getattr(args, "gpu", False)
self.args = args
self.load_model_vocab(args)
if getattr(
self.model.decoder.layers[0].encoder_attn,
'pre_decision_ratio',
None
) is not None:
self.speech_segment_size *= (
self.model.decoder.layers[0].encoder_attn.pre_decision_ratio
)
args.global_cmvn = None
if args.config:
with open(os.path.join(args.data_bin, args.config), "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
if "global_cmvn" in config:
args.global_cmvn = np.load(config["global_cmvn"]["stats_npz_path"])
if args.global_stats:
with PathManager.open(args.global_stats, "r") as f:
global_cmvn = json.loads(f.read())
self.global_cmvn = {"mean": global_cmvn["mean"], "std": global_cmvn["stddev"]}
self.feature_extractor = OnlineFeatureExtractor(args)
self.max_len = args.max_len
self.force_finish = args.force_finish
torch.set_grad_enabled(False)
def build_states(self, args, client, sentence_id):
# Initialize states here, for example add customized entry to states
# This function will be called at beginning of every new sentence
states = SpeechStates(args, client, sentence_id, self)
self.initialize_states(states)
return states
def to_device(self, tensor):
if self.gpu:
return tensor.cuda()
else:
return tensor.cpu()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--model-path', type=str, required=True,
help='path to your pretrained model.')
parser.add_argument("--data-bin", type=str, required=True,
help="Path of data binary")
parser.add_argument("--config", type=str, default=None,
help="Path to config yaml file")
parser.add_argument("--global-stats", type=str, default=None,
help="Path to json file containing cmvn stats")
parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for target text")
parser.add_argument("--tgt-splitter-path", type=str, default=None,
help="Subword splitter model path for target text")
parser.add_argument("--user-dir", type=str, default="examples/simultaneous_translation",
help="User directory for simultaneous translation")
parser.add_argument("--max-len", type=int, default=200,
help="Max length of translation")
parser.add_argument("--force-finish", default=False, action="store_true",
help="Force the model to finish the hypothsis if the source is not finished")
parser.add_argument("--shift-size", type=int, default=SHIFT_SIZE,
help="Shift size of feature extraction window.")
parser.add_argument("--window-size", type=int, default=WINDOW_SIZE,
help="Window size of feature extraction window.")
parser.add_argument("--sample-rate", type=int, default=SAMPLE_RATE,
help="Sample rate")
parser.add_argument("--feature-dim", type=int, default=FEATURE_DIM,
help="Acoustic feature dimension.")
# fmt: on
return parser
def load_model_vocab(self, args):
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
task_args = state["cfg"]["task"]
task_args.data = args.data_bin
if args.config is not None:
task_args.config_yaml = args.config
task = tasks.setup_task(task_args)
# build model for ensemble
state["cfg"]["model"].load_pretrained_encoder_from = None
state["cfg"]["model"].load_pretrained_decoder_from = None
self.model = task.build_model(state["cfg"]["model"])
self.model.load_state_dict(state["model"], strict=True)
self.model.eval()
self.model.share_memory()
if self.gpu:
self.model.cuda()
# Set dictionary
self.dict = {}
self.dict["tgt"] = task.target_dictionary
def initialize_states(self, states):
self.feature_extractor.clear_cache()
states.units.source = TensorListEntry()
states.units.target = ListEntry()
states.incremental_states = dict()
def segment_to_units(self, segment, states):
# Convert speech samples to features
features = self.feature_extractor(segment)
if features is not None:
return [features]
else:
return []
def units_to_segment(self, units, states):
# Merge sub word to full word.
if self.model.decoder.dictionary.eos() == units[0]:
return DEFAULT_EOS
segment = []
if None in units.value:
units.value.remove(None)
for index in units:
if index is None:
units.pop()
token = self.model.decoder.dictionary.string([index])
if token.startswith(BOW_PREFIX):
if len(segment) == 0:
segment += [token.replace(BOW_PREFIX, "")]
else:
for j in range(len(segment)):
units.pop()
string_to_return = ["".join(segment)]
if self.model.decoder.dictionary.eos() == units[0]:
string_to_return += [DEFAULT_EOS]
return string_to_return
else:
segment += [token.replace(BOW_PREFIX, "")]
if (
len(units) > 0
and self.model.decoder.dictionary.eos() == units[-1]
or len(states.units.target) > self.max_len
):
tokens = [self.model.decoder.dictionary.string([unit]) for unit in units]
return ["".join(tokens).replace(BOW_PREFIX, "")] + [DEFAULT_EOS]
return None
def update_model_encoder(self, states):
if len(states.units.source) == 0:
return
src_indices = self.to_device(
states.units.source.value.unsqueeze(0)
)
src_lengths = self.to_device(
torch.LongTensor([states.units.source.value.size(0)])
)
states.encoder_states = self.model.encoder(src_indices, src_lengths)
torch.cuda.empty_cache()
def update_states_read(self, states):
# Happens after a read action.
self.update_model_encoder(states)
def policy(self, states):
if not getattr(states, "encoder_states", None):
return READ_ACTION
tgt_indices = self.to_device(
torch.LongTensor(
[self.model.decoder.dictionary.eos()]
+ [x for x in states.units.target.value if x is not None]
).unsqueeze(0)
)
states.incremental_states["steps"] = {
"src": states.encoder_states["encoder_out"][0].size(0),
"tgt": 1 + len(states.units.target),
}
states.incremental_states["online"] = not states.finish_read()
x, outputs = self.model.decoder.forward(
prev_output_tokens=tgt_indices,
encoder_out=states.encoder_states,
incremental_state=states.incremental_states,
)
states.decoder_out = x
states.decoder_out_extra = outputs
torch.cuda.empty_cache()
if outputs["action"] == 0:
return READ_ACTION
else:
return WRITE_ACTION
def predict(self, states):
decoder_states = states.decoder_out
lprobs = self.model.get_normalized_probs(
[decoder_states[:, -1:]], log_probs=True
)
index = lprobs.argmax(dim=-1)
index = index[0, 0].item()
if (
self.force_finish
and index == self.model.decoder.dictionary.eos()
and not states.finish_read()
):
# If we want to force finish the translation
# (don't stop before finish reading), return a None
# self.model.decoder.clear_cache(states.incremental_states)
index = None
return index
|
COCO-LM/fairseq/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py",
"repo_id": "COCO-LM",
"token_count": 5691
}
| 177 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
# Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax``
# which adds ``None`` values to an ``nn.ParameterList``, which is not
# supported in PyTorch. Instead we can replace this with an
# ``nn.ModuleList``, which does support ``None`` values.
try:
if all(p is None for p in self.model.crit.out_projs._parameters.values()):
self.model.crit.out_projs = torch.nn.ModuleList(
[None] * len(self.model.crit.out_projs._parameters)
)
except Exception:
pass
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
|
COCO-LM/fairseq/examples/truncated_bptt/transformer_xl_model.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/truncated_bptt/transformer_xl_model.py",
"repo_id": "COCO-LM",
"token_count": 2391
}
| 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data import Dictionary
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
@register_model("dummy_model")
class DummyModel(FairseqLanguageModel):
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
@staticmethod
def add_args(parser):
parser.add_argument("--num-layers", type=int, default=24)
parser.add_argument("--embed-dim", type=int, default=1024)
@classmethod
def build_model(cls, args, task):
encoder = DummyEncoder(
num_embed=len(task.target_dictionary),
embed_dim=args.embed_dim,
num_layers=args.num_layers,
)
return cls(args, encoder)
def forward(self, src_tokens, masked_tokens=None, **kwargs):
return self.decoder(src_tokens, masked_tokens=masked_tokens)
class DummyEncoder(FairseqDecoder):
def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24):
super().__init__(Dictionary())
self.embed = nn.Embedding(
num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0
)
self.layers_a = nn.ModuleList(
[
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection
nn.Linear(3 * embed_dim, embed_dim), # skip self-attention
nn.Linear(embed_dim, embed_dim), # output projection
nn.Dropout(),
)
for i in range(num_layers)
]
)
self.layers_b = nn.ModuleList(
[
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 4 * embed_dim), # FFN
nn.ReLU(),
nn.Linear(4 * embed_dim, embed_dim), # FFN
nn.Dropout(0.1),
)
for i in range(num_layers)
]
)
self.out_proj = nn.Linear(embed_dim, num_embed)
def forward(self, tokens, masked_tokens=None):
x = self.embed(tokens)
for layer_a, layer_b in zip(self.layers_a, self.layers_b):
x = x + layer_a(x)
x = x + layer_b(x)
x = self.out_proj(x)
if masked_tokens is not None:
x = x[masked_tokens]
return (x,)
def max_positions(self):
return 1024
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
@register_model_architecture("dummy_model", "dummy_model")
def base_architecture(args):
pass
|
COCO-LM/fairseq/fairseq/benchmark/dummy_model.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/benchmark/dummy_model.py",
"repo_id": "COCO-LM",
"token_count": 1569
}
| 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
report_accuracy: bool = field(
default=False,
metadata={"help": "report accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
sentence_avg: bool = II("optimization.sentence_avg")
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion(
"label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionConfig
)
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size=0,
report_accuracy=False,
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py",
"repo_id": "COCO-LM",
"token_count": 2932
}
| 180 |
import importlib
import os
from abc import ABC, abstractmethod
from typing import Dict, Optional
class AudioFeatureTransform(ABC):
@classmethod
@abstractmethod
def from_config_dict(cls, config: Optional[Dict] = None):
pass
AUDIO_FEATURE_TRANSFORM_REGISTRY = {}
AUDIO_FEATURE_TRANSFORM_CLASS_NAMES = set()
def register_audio_feature_transform(name):
def register_audio_feature_transform_cls(cls):
if name in AUDIO_FEATURE_TRANSFORM_REGISTRY:
raise ValueError(f"Cannot register duplicate transform ({name})")
if not issubclass(cls, AudioFeatureTransform):
raise ValueError(
f"Transform ({name}: {cls.__name__}) must extend "
"AudioFeatureTransform"
)
if cls.__name__ in AUDIO_FEATURE_TRANSFORM_CLASS_NAMES:
raise ValueError(
f"Cannot register audio feature transform with duplicate "
f"class name ({cls.__name__})"
)
AUDIO_FEATURE_TRANSFORM_REGISTRY[name] = cls
AUDIO_FEATURE_TRANSFORM_CLASS_NAMES.add(cls.__name__)
return cls
return register_audio_feature_transform_cls
def get_audio_feature_transform(name):
return AUDIO_FEATURE_TRANSFORM_REGISTRY[name]
transforms_dir = os.path.dirname(__file__)
for file in os.listdir(transforms_dir):
path = os.path.join(transforms_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module("fairseq.data.audio.feature_transforms." + name)
class CompositeAudioFeatureTransform(AudioFeatureTransform):
@classmethod
def from_config_dict(cls, config=None):
_config = {} if config is None else config
_transforms = _config.get("transforms")
if _transforms is None:
return None
transforms = [
get_audio_feature_transform(_t).from_config_dict(_config.get(_t))
for _t in _transforms
]
return CompositeAudioFeatureTransform(transforms)
def __init__(self, transforms):
self.transforms = [t for t in transforms if t is not None]
def __call__(self, x):
for t in self.transforms:
x = t(x)
return x
def __repr__(self):
format_string = (
[self.__class__.__name__ + "("]
+ [f" {t.__repr__()}" for t in self.transforms]
+ [")"]
)
return "\n".join(format_string)
|
COCO-LM/fairseq/fairseq/data/audio/feature_transforms/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/audio/feature_transforms/__init__.py",
"repo_id": "COCO-LM",
"token_count": 1146
}
| 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from multiprocessing import Pool
import torch
from fairseq import utils
from fairseq.binarizer import safe_readline
from fairseq.data import data_utils
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
class Dictionary:
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
):
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.alias_mapper = {'<s>': '[CLS]', '<pad>': '[PAD]', '</s>':'[SEP]', '<unk>': '[UNK]', '<mask>': '[MASK]',
'[CLS]': '[CLS]', '[PAD]': '[PAD]', '[SEP]':'[SEP]', '[UNK]': '[UNK]', '[MASK]': '[MASK]'}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.alias_mapper:
sym = self.alias_mapper[sym]
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
include_eos=False,
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(t, bpe_symbol, escape_unk, extra_symbols_to_ignore, include_eos=include_eos)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = " ".join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.post_process(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.alias_mapper:
word = self.alias_mapper[word]
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
if word in self.alias_mapper:
word = self.alias_mapper[word]
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
) -> torch.IntTensor:
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(
filename, tokenize, eos_word, worker_id=0, num_workers=1
):
counter = Counter()
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
# f.tell() returns only an opaque number which can
# return to the position in the file via f.seek()
# and does not necessarily represent a byte position
# in the file. However, f.tell() is faithful to the
# byte position _most of the time_. Thus we can just
# check against the file size to prevent early exit.
if f.tell() > end and f.tell() < size:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(
pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary._add_file_to_dictionary_single_worker(
filename, tokenize, dict.eos_word
)
)
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(
wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__),
{},
)
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
|
COCO-LM/fairseq/fairseq/data/dictionary.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/dictionary.py",
"repo_id": "COCO-LM",
"token_count": 6848
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import encoders
def get_whole_word_mask(args, dictionary):
bpe = encoders.build_bpe(args)
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith("madeupword"):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(dictionary))))
)
return mask_whole_words
return None
|
COCO-LM/fairseq/fairseq/data/encoders/utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/utils.py",
"repo_id": "COCO-LM",
"token_count": 409
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from collections import OrderedDict
from typing import Dict, List
import numpy as np
from fairseq.data import data_utils
from . import FairseqDataset
logger = logging.getLogger(__name__)
class MultiCorpusDataset(FairseqDataset):
"""
Stores multiple instances of FairseqDataset together. Requires each instance
to be the same dataset, as the collate method needs to work on batches with
samples from each dataset.
Allows specifying a distribution over the datasets to use. Note that unlike
MultiCorpusSampledDataset, this distribution allows sampling for each item,
rather than on a batch level.
Each time ordered_indices() is called, a new sample is generated with
the specified distribution.
Args:
datasets: a OrderedDict of FairseqDataset instances.
distribution: a List containing the probability of getting an utterance from
corresponding dataset
seed: random seed for sampling the datsets
sort_indices: if true, will sort the ordered indices by size
batch_sample: if true, will ensure each batch is from a single dataset
"""
def __init__(
self,
datasets: Dict[str, FairseqDataset],
distribution: List[float],
seed: int,
sort_indices: bool = False,
batch_sample: bool = False,
):
super().__init__()
assert isinstance(datasets, OrderedDict)
assert len(datasets) == len(distribution)
assert sum(distribution) == 1
self.datasets = datasets
self.distribution = distribution
self.seed = seed
self.sort_indices = sort_indices
self.batch_sample = batch_sample
# Avoid repeated conversions to list later
self.dataset_list = list(datasets.values())
self.total_num_instances = 0
first_dataset = list(self.datasets.values())[0]
self.dataset_offsets = []
for dataset in datasets.values():
assert isinstance(dataset, FairseqDataset)
assert type(dataset) is type(first_dataset)
self.dataset_offsets.append(self.total_num_instances)
self.total_num_instances += len(dataset)
def ordered_indices(self):
start = time.time()
with data_utils.numpy_seed(self.seed, self.epoch):
sampled_indices = []
num_selected_instances = 0
# For each dataset i, sample self.distribution[i] * self.total_num_instances
for i, key in enumerate(self.datasets):
if i < len(self.datasets) - 1:
num_instances = int(self.distribution[i] * self.total_num_instances)
high = self.dataset_offsets[i + 1]
else:
num_instances = self.total_num_instances - num_selected_instances
high = self.total_num_instances
logger.info(f"sampling {num_instances} from {key} dataset")
num_selected_instances += num_instances
# First, add k copies of the dataset where k = num_instances // len(dataset).
# This ensures an equal distribution of the data points as much as possible.
# For the remaining entries randomly sample them
dataset_size = len(self.datasets[key])
num_copies = num_instances // dataset_size
dataset_indices = (
np.random.permutation(high - self.dataset_offsets[i])
+ self.dataset_offsets[i]
)[: num_instances - num_copies * dataset_size]
if num_copies > 0:
sampled_indices += list(
np.concatenate(
(
np.repeat(
np.arange(self.dataset_offsets[i], high), num_copies
),
dataset_indices,
)
)
)
else:
sampled_indices += list(dataset_indices)
assert (
len(sampled_indices) == self.total_num_instances
), f"{len(sampled_indices)} vs {self.total_num_instances}"
np.random.shuffle(sampled_indices)
if self.sort_indices:
sampled_indices.sort(key=lambda i: self.num_tokens(i))
logger.info(
"multi_corpus_dataset ordered_indices took {}s".format(
time.time() - start
)
)
return np.array(sampled_indices, dtype=np.int64)
def _map_index(self, index: int):
"""
If dataset A has length N and dataset B has length M
then index 1 maps to index 1 of dataset A, and index N + 1
maps to index 1 of B.
"""
counter = 0
for key, dataset in self.datasets.items():
if index < counter + len(dataset):
return index - counter, key
counter += len(dataset)
raise ValueError(
"Invalid index: {}, max: {}".format(index, self.total_num_instances)
)
def __len__(self):
"""
Length of this dataset is the sum of individual datasets
"""
return self.total_num_instances
def __getitem__(self, index):
new_index, key = self._map_index(index)
try:
item = self.datasets[key][new_index]
item["full_id"] = index
return item
except Exception as e:
e.args = (f"Error from {key} dataset", *e.args)
raise
def collater(self, samples):
"""
If we are doing batch sampling, then pick the right collater to use.
Otherwise we assume all collaters are the same.
"""
if len(samples) == 0:
return None
_, key = self._map_index(samples[0]["full_id"])
return self.datasets[key].collater(samples)
def num_tokens(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].num_tokens(index)
def size(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].size(index)
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@property
def supports_prefetch(self):
return False
@property
def supports_fetch_outside_dataloader(self):
return all(
self.datasets[key].supports_fetch_outside_dataloader
for key in self.datasets
)
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
if not self.batch_sample:
return super().batch_by_size(
indices, max_tokens, max_sentences, required_batch_size_multiple
)
dataset_indices = {key: [] for key in self.datasets}
for i in indices:
_, key = self._map_index(i)
dataset_indices[key].append(i)
batches = []
for key in dataset_indices:
cur_batches = super().batch_by_size(
np.array(dataset_indices[key], dtype=np.int64),
max_tokens,
max_sentences,
required_batch_size_multiple,
)
logger.info(f"Created {len(cur_batches)} batches for dataset {key}")
batches += cur_batches
# Assume shuffling is handled in fairseq/data/iterators.py
return batches
|
COCO-LM/fairseq/fairseq/data/multi_corpus_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/multi_corpus_dataset.py",
"repo_id": "COCO-LM",
"token_count": 3758
}
| 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
if self.token is not None:
return np.array(self.dataset.sizes) + 1
else:
return self.dataset.sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
|
COCO-LM/fairseq/fairseq/data/prepend_token_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/prepend_token_dataset.py",
"repo_id": "COCO-LM",
"token_count": 460
}
| 185 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:
from fvcore.common.file_io import PathManager as FVCorePathManager
try:
# [FB only - for now] AWS PathHandler for PathManager
from .fb_pathhandlers import S3PathHandler
FVCorePathManager.register_handler(S3PathHandler())
except KeyError:
logging.warning("S3PathHandler already registered.")
except ImportError:
logging.debug(
"S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module."
)
except ImportError:
FVCorePathManager = None
IOPathPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
fvcore's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if FVCorePathManager:
return FVCorePathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if FVCorePathManager:
return FVCorePathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if FVCorePathManager:
return FVCorePathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if FVCorePathManager:
return FVCorePathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if FVCorePathManager:
return FVCorePathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if FVCorePathManager:
return FVCorePathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if FVCorePathManager:
for p in FVCorePathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathPathManager
if not IOPathPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathPathManager
if IOPathPathManager:
return IOPathPathManager.async_close()
return False
|
COCO-LM/fairseq/fairseq/file_io.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/file_io.py",
"repo_id": "COCO-LM",
"token_count": 2581
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import (
Embedding,
TransformerDecoderEmbedding,
TransformerDecoderLayer,
TransformerDecoderOutputLayer,
TransformerEncoderEmbedding,
TransformerEncoderLayer,
TransformerEncoderLayerNorm,
)
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.transformer import (
base_architecture,
transformer_iwslt_de_en,
transformer_wmt_en_de_big,
)
from fairseq.modules import SinusoidalPositionalEmbedding
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
TORCH_PIPE = False
RPC_INIT = False
def import_pipe():
global TORCH_PIPE
global RPC_INIT
try:
from torch.distributed.pipeline.sync import Pipe # noqa
global Pipe
from torch.distributed.pipeline.sync.utils import partition_model
global partition_model
from torch.distributed import rpc
import tempfile
TORCH_PIPE = True
# Initialize single process RPC agent since TORCH_PIPE requires
# RRef. RRef depends on RPC being initialized and as a result we initialize
# RPC with a single node.
tmpfile = tempfile.NamedTemporaryFile()
if not RPC_INIT:
rpc.init_rpc(
name="worker",
rank=0,
world_size=1,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(tmpfile.name),
)
)
RPC_INIT = True
logger.info('Using torch pipe')
except ImportError:
try:
from fairscale.nn import Pipe # noqa
logger.info('Using fairscale pipe')
except ImportError:
raise ImportError("Please install fairscale with: pip install fairscale")
@register_model("pipeline_parallel_transformer")
class PipelineParallelTransformerModel(BaseFairseqModel):
def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint):
import_pipe()
super().__init__()
assert isinstance(encoder, FairseqEncoder)
assert isinstance(decoder, FairseqDecoder)
encoder_module_list = (
[encoder.embedding_layer]
+ list(encoder.encoder_layers)
+ [encoder.final_layer_norm]
)
self.num_encoder_modules = len(encoder_module_list)
decoder_module_list = (
[decoder.embedding_layer]
+ list(decoder.decoder_layers)
+ [decoder.decoder_output_layer]
)
self.num_decoder_modules = len(decoder_module_list)
module_list = encoder_module_list + decoder_module_list
self.devices = devices
if TORCH_PIPE:
self.model = Pipe(
partition_model(nn.Sequential(*module_list), balance, devices),
chunks=chunks,
checkpoint=checkpoint,
)
else:
self.model = Pipe(
nn.Sequential(*module_list),
balance=balance,
devices=devices,
chunks=chunks,
checkpoint=checkpoint,
)
self.encoder_max_positions = self.max_positions_helper(
encoder.embedding_layer, "max_source_positions"
)
self.decoder_max_positions = self.max_positions_helper(
decoder.embedding_layer, "max_target_positions"
)
self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None)
# Note: To be populated during inference
self.encoder = None
self.decoder = None
def forward(self, src_tokens, src_lengths, prev_output_tokens):
if self.training:
input_lst = [src_tokens, src_lengths, prev_output_tokens]
input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst)
if TORCH_PIPE:
return self.model(input).local_value()
else:
return self.model(input)
else:
assert self.encoder is not None and self.decoder is not None, (
"encoder and decoder need to be initialized by "
+ "calling the `prepare_for_inference_()` method"
)
encoder_output_tuple = self.encoder(input)
return self.decoder(encoder_output_tuple)
def prepare_for_inference_(self, cfg):
if self.encoder is not None and self.decoder is not None:
logger.info("Encoder and Decoder already initialized")
return
encoder_module_list = []
decoder_module_list = []
module_count = 0
for partition in self.model.partitions:
for module in partition:
if module_count < self.num_encoder_modules:
encoder_module_list.append(module)
else:
decoder_module_list.append(module)
module_count += 1
self.model = None
self.encoder = TransformerEncoder(cfg.distributed_training, None, None, encoder_module_list)
self.decoder = TransformerDecoder(
cfg.distributed_training, None, None, decoder_module_list=decoder_module_list
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1,
help='Number of embedding layer chunks (enables more even distribution'
'of optimizer states across data parallel nodes'
'when using optimizer state sharding and'
'a big embedding vocabulary)')
# fmt: on
@classmethod
def build_model_base(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, "max_source_positions"):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, "max_target_positions"):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1):
assert embed_dim % num_embed_chunks == 0, (
f"Number of embedding chunks = {num_embed_chunks} should be "
+ f"divisible by the embedding dimension = {embed_dim}"
)
assert path is None or num_embed_chunks == 1, (
"Loading embedding from a path with number of embedding chunks > 1"
+ " is not yet supported"
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
# if provided, load from preloaded dictionaries
if path:
emb = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
else:
embed_chunk_dim = embed_dim // num_embed_chunks
emb = nn.ModuleList()
for i in range(num_embed_chunks):
emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx))
return emb
num_embed_chunks = args.num_embedding_chunks
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_embed_path,
num_embed_chunks,
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
assert args.share_decoder_input_output_embed or num_embed_chunks == 1, (
"Not sharing decoder I/O embeddings is not yet supported with number of "
+ "embedding chunks > 1"
)
encoder_embed_tokens = build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_embed_path,
num_embed_chunks,
)
decoder_embed_tokens = build_embedding(
tgt_dict,
args.decoder_embed_dim,
args.decoder_embed_path,
num_embed_chunks,
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return (encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@classmethod
def build_model(cls, args, task):
encoder, decoder = cls.build_model_base(args, task)
return PipelineParallelTransformerModel(
encoder=encoder,
decoder=decoder,
balance=utils.eval_str_list(args.pipeline_balance, type=int),
devices=utils.eval_str_list(args.pipeline_devices, type=int),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder_max_positions, self.decoder_max_positions)
def max_positions_helper(
self, embedding_layer, max_positions_field="max_source_positions"
):
"""Maximum input length supported by the encoder or decoder."""
if embedding_layer.embed_positions is None:
return getattr(embedding_layer, max_positions_field)
return min(
getattr(embedding_layer, max_positions_field),
embedding_layer.embed_positions.max_positions,
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
if sample is not None:
assert "target" in sample
target = sample["target"]
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output, target=target)
return out.exp_() if not log_probs else out
# A Pipe() module returns a tuple of tensors as the output.
# In this case, the tuple has one element - the output tensor of logits
logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=False)
else:
return utils.softmax(logits, dim=-1, onnx_trace=False)
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder_max_positions
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
is_regular_transformer = not any("model.partitions" in k for k in state_dict)
if is_regular_transformer:
state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict)
return super().load_state_dict(state_dict, strict)
def convert_to_pipeline_parallel_state_dict(self, state_dict):
new_state_dict = self.state_dict()
encoder_layer_idx = 0
decoder_layer_idx = 0
encoder_key_suffixes = [
"self_attn.k_proj.weight",
"self_attn.k_proj.bias",
"self_attn.v_proj.weight",
"self_attn.v_proj.bias",
"self_attn.q_proj.weight",
"self_attn.q_proj.bias",
"self_attn.out_proj.weight",
"self_attn.out_proj.bias",
"self_attn_layer_norm.weight",
"self_attn_layer_norm.bias",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"final_layer_norm.weight",
"final_layer_norm.bias",
]
decoder_key_suffixes = [
"self_attn.k_proj.weight",
"self_attn.k_proj.bias",
"self_attn.v_proj.weight",
"self_attn.v_proj.bias",
"self_attn.q_proj.weight",
"self_attn.q_proj.bias",
"self_attn.out_proj.weight",
"self_attn.out_proj.bias",
"self_attn_layer_norm.weight",
"self_attn_layer_norm.bias",
"encoder_attn.k_proj.weight",
"encoder_attn.k_proj.bias",
"encoder_attn.v_proj.weight",
"encoder_attn.v_proj.bias",
"encoder_attn.q_proj.weight",
"encoder_attn.q_proj.bias",
"encoder_attn.out_proj.weight",
"encoder_attn.out_proj.bias",
"encoder_attn_layer_norm.weight",
"encoder_attn_layer_norm.bias",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"final_layer_norm.weight",
"final_layer_norm.bias",
]
for pid, partition in enumerate(self.model.partitions):
logger.info(f"Begin Partition {pid}")
for mid, module in enumerate(partition):
# fmt: off
if isinstance(module, TransformerEncoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor']
if isinstance(module, TransformerEncoderLayer):
for suffix in encoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}']
encoder_layer_idx += 1
if isinstance(module, TransformerDecoderLayer):
for suffix in decoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}']
decoder_layer_idx += 1
if isinstance(module, TransformerEncoderLayerNorm):
if 'encoder.layer_norm.weight' in state_dict:
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias']
if isinstance(module, TransformerDecoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor']
if isinstance(module, TransformerDecoderOutputLayer):
new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight']
# fmt: on
return new_state_dict
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
import_pipe()
self.use_pipeline = encoder_module_list is not None
if not self.use_pipeline:
self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens)
self.encoder_layers = nn.Sequential(*[TransformerEncoderLayer(args) for i in range(args.encoder_layers)])
if isinstance(embed_tokens, nn.ModuleList):
emb_dim = sum(e.embedding_dim for e in embed_tokens)
else:
emb_dim = embed_tokens.embedding_dim
self.final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim)
else:
encoder_balance = utils.eval_str_list(
args.pipeline_encoder_balance, type=int
)
encoder_devices = utils.eval_str_list(
args.pipeline_encoder_devices, type=int
)
assert sum(encoder_balance) == len(encoder_module_list), (
f"Sum of encoder_balance={encoder_balance} is not equal "
+ f"to num_encoder_modules={len(encoder_module_list)}"
)
if TORCH_PIPE:
self.model = Pipe(
module=partition_model(nn.Sequential(*encoder_module_list), encoder_balance, encoder_devices),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
else:
self.model = Pipe(
module=nn.Sequential(*encoder_module_list),
balance=encoder_balance,
devices=encoder_devices,
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def forward(self, src_tokens, src_lengths):
"""
Args:
input_tuple(
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
)
Returns:
output_tuple(
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- prev_output_tokens
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
)
"""
dummy_prev_output_tokens = torch.zeros(
1, dtype=src_tokens.dtype, device=src_tokens.device
)
input_tuple = (src_tokens, src_lengths, dummy_prev_output_tokens)
if self.use_pipeline:
input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple)
if TORCH_PIPE:
encoder_out = self.model(input_tuple).local_value()
else:
encoder_out = self.model(input_tuple)
else:
encoder_embed_output_tuple = self.embedding_layer(input_tuple)
encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple)
encoder_out = self.final_layer_norm(encoder_layers_output)
# first element is the encoder output
# second element is the encoder padding mask
# the remaining elements of EncoderOut are not computed by
# the PipelineParallelTransformer
return EncoderOut(encoder_out[0], encoder_out[1], None, None, None, None)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out.encoder_out is not None:
encoder_out = encoder_out._replace(
encoder_out=encoder_out.encoder_out.index_select(1, new_order)
)
if encoder_out.encoder_padding_mask is not None:
encoder_out = encoder_out._replace(
encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(
0, new_order
)
)
if encoder_out.encoder_embedding is not None:
encoder_out = encoder_out._replace(
encoder_embedding=encoder_out.encoder_embedding.index_select(
0, new_order
)
)
if encoder_out.encoder_states is not None:
for idx, state in enumerate(encoder_out.encoder_states):
encoder_out.encoder_states[idx] = state.index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embedding_layer.embed_positions is None:
return self.embedding_layer.max_source_positions
return min(
self.embedding_layer.max_source_positions,
self.embedding_layer.embed_positions.max_positions,
)
class TransformerDecoder(FairseqDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
decoder_module_list=None,
):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
import_pipe()
self.use_pipeline = decoder_module_list is not None
if not self.use_pipeline:
self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens)
self.decoder_layers = nn.Sequential(*[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.decoder_output_layer = TransformerDecoderOutputLayer(
args, embed_tokens, dictionary
)
else:
decoder_balance = utils.eval_str_list(
args.pipeline_decoder_balance, type=int
)
decoder_devices = utils.eval_str_list(
args.pipeline_decoder_devices, type=int
)
assert sum(decoder_balance) == len(decoder_module_list), (
f"Sum of decoder_balance={decoder_balance} is not equal "
+ f"to num_decoder_modules={len(decoder_module_list)}"
)
if TORCH_PIPE:
self.model = Pipe(
module=partition_model(nn.Sequential(*decoder_module_list), decoder_balance, decoder_devices),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
else:
self.model = Pipe(
module=nn.Sequential(*decoder_module_list),
balance=decoder_balance,
devices=decoder_devices,
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def forward(
self,
prev_output_tokens,
encoder_out=None,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
input_tuple = (
encoder_out.encoder_out,
encoder_out.encoder_padding_mask,
prev_output_tokens,
)
if self.use_pipeline:
input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple)
if TORCH_PIPE:
return (self.model(input_tuple).local_value(),)
else:
return (self.model(input_tuple),)
else:
embed_layer_output = self.embedding_layer(input_tuple)
state = self.decoder_layers(embed_layer_output)
return (self.decoder_output_layer(state),)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embedding_layer.embed_positions is None:
return self.embedding_layer.max_target_positions
return min(
self.embedding_layer.max_target_positions,
self.embedding_layer.embed_positions.max_positions,
)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
@register_model_architecture(
"pipeline_parallel_transformer", "transformer_iwslt_de_en_pipeline_parallel"
)
def transformer_iwslt_de_en_dist(args):
transformer_iwslt_de_en(args)
@register_model_architecture(
"pipeline_parallel_transformer", "transformer_wmt_en_de_big_pipeline_parallel"
)
def transformer_wmt_en_de_big_dist(args):
transformer_wmt_en_de_big(args)
|
COCO-LM/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py",
"repo_id": "COCO-LM",
"token_count": 16338
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import utils
from torch import Tensor
class FairseqDecoder(nn.Module):
"""Base class for decoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
self.adaptive_softmax = None
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def output_layer(self, features, **kwargs):
"""
Project features to the default output size, e.g., vocabulary size.
Args:
features (Tensor): features returned by *extract_features*.
"""
raise NotImplementedError
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
if sample is not None:
assert "target" in sample
target = sample["target"]
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return out.exp_() if not log_probs else out
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
def max_positions(self):
"""Maximum input length supported by the decoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code."""
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True
|
COCO-LM/fairseq/fairseq/models/fairseq_decoder.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/fairseq_decoder.py",
"repo_id": "COCO-LM",
"token_count": 1588
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .fairseq_nat_model import *
from .nonautoregressive_transformer import *
from .nat_crf_transformer import *
from .iterative_nonautoregressive_transformer import *
from .cmlm_transformer import *
from .levenshtein_transformer import *
from .insertion_transformer import *
|
COCO-LM/fairseq/fairseq/models/nat/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/__init__.py",
"repo_id": "COCO-LM",
"token_count": 137
}
| 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Unsupervised Cross-lingual Representation Learning at Scale
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("xlmr")
class XLMRModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz",
"xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
|
COCO-LM/fairseq/fairseq/models/roberta/model_xlmr.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/roberta/model_xlmr.py",
"repo_id": "COCO-LM",
"token_count": 559
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MultiheadAttention,
SamePad,
TransposeLast,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class Wav2Vec2Config(FairseqDataclass):
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=False, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
@register_model("wav2vec2", dataclass=Wav2Vec2Config)
class Wav2Vec2Model(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2Config):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self, x, padding_mask,
mask_indices=None, mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz,tsz,fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits = logits / self.logit_temp
if is_xla_tensor(logits) or neg_is_pos.any():
fillval = -float(2**30)
if not hasattr(self, '_inftensor'):
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits) else
float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2])
return input_lengths.to(torch.long)
def forward(
self, source, padding_mask=None, mask=True, features_only=False,
mask_indices=None, mask_channel_indices=None,
padding_count=None,
):
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[(torch.arange(padding_mask.shape[0], device=padding_mask.device), output_lengths - 1)] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
if self.input_quantizer:
q = self.input_quantizer(features, produce_targets=False)
features = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
features = self.project_inp(features)
if mask:
x, mask_indices = self.apply_mask(
features, padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x) and mask_indices is not None:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x = self.encoder(x, padding_mask=padding_mask)
if features_only:
return {"x": x, "padding_mask": padding_mask}
if self.quantizer:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
if self.negatives_from_everywhere:
neg_cands = self.quantizer(
unmasked_features, produce_targets=False
)["x"]
negs, _ = self.sample_negatives(
neg_cands, y.size(1), padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y, y.size(1), padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features, y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y, y.size(1), padding_count=padding_count,
)
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x, "padding_mask": padding_mask, "features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, source, padding_mask, mask=False):
res = self.forward(source, padding_mask, mask=mask, features_only=True)
return res["x"], res["padding_mask"]
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
self.layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
)
for _ in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None):
x = self.extract_features(x, padding_mask)
if self.layer_norm_first:
x = self.layer_norm(x)
return x
def extract_features(self, x, padding_mask=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
layer_results.append(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=need_weights,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
|
COCO-LM/fairseq/fairseq/models/wav2vec/wav2vec2.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/wav2vec/wav2vec2.py",
"repo_id": "COCO-LM",
"token_count": 16096
}
| 191 |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <torch/extension.h>
#include <vector>
std::vector<at::Tensor> dynamicconv_cuda_forward(
at::Tensor input,
at::Tensor filters,
int padding_l);
std::vector<at::Tensor> dynamicconv_cuda_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters);
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std::vector<at::Tensor> dynamicconv_forward(
at::Tensor input,
at::Tensor filters,
int padding_l) {
CHECK_INPUT(input);
CHECK_INPUT(filters);
return dynamicconv_cuda_forward(input, filters,
padding_l);
}
std::vector<at::Tensor> dynamicconv_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters) {
CHECK_INPUT(gradOutput);
CHECK_INPUT(input);
CHECK_INPUT(filters);
return dynamicconv_cuda_backward(gradOutput, padding_l,
input, filters);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)");
m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)");
}
|
COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp",
"repo_id": "COCO-LM",
"token_count": 597
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = filters.size(0);
const auto filterSize = filters.size(1);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{
lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
final_return = """
}
return {output};
}
"""
with open("lightconv_cuda_forward.cu", "w") as forward:
forward.write(head)
for seq in seqs:
forward.write(sequence_if.format(seq=seq))
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(con_else)
forward.write(final_else)
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(final_return)
def gen_backward():
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters) {
// gradWrtInput
const int minibatch = input.size(0);
const int numFeatures = input.size(1);
const int sequenceLength = input.size(2);
const int numHeads = filters.size(0);
const int filterSize = filters.size(1);
const dim3 gradBlocks(minibatch, numFeatures);
const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);
const dim3 weightGradSecondpassBlocks(numHeads, filterSize);
const int numFiltersInBlock = numFeatures / numHeads;
auto gradInput = at::zeros_like(input);
auto gradFilters = at::zeros_like(filters);
at::DeviceGuard g(input.device());
auto stream = at::cuda::getCurrentCUDAStream();
switch(filterSize) {
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{
lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
gradInput.data<scalar_t>());
"""
weight_grad_short = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>
<<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
weight_grad = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
"""
breakout = """
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradFilters};
}
"""
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, -1, -1, -1]
max_mem = [-1, -1, -1, -1, -1, 192, 96, 64]
with open("lightconv_cuda_backward.cu", "w") as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (t == -1 or seq <= t) and (mem == -1 or seq < mem):
backward.write(sequence_if.format(seq=seq))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
|
COCO-LM/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py",
"repo_id": "COCO-LM",
"token_count": 5202
}
| 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQEmbedding(nn.Module):
"""
Quantized counterpart of nn.Embedding module. Stores the centroids and
the assignments. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Embedding module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Embedding module for a standard training loop.
"""
def __init__(
self,
centroids,
assignments,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
):
super(PQEmbedding, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
# check compatibility
if self.embedding_dim % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.num_embeddings != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.num_embeddings, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, input):
return F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
def extra_repr(self):
s = "{num_embeddings}, {embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
if self.max_norm is not None:
s += ", max_norm={max_norm}"
if self.norm_type != 2:
s += ", norm_type={norm_type}"
if self.scale_grad_by_freq is not False:
s += ", scale_grad_by_freq={scale_grad_by_freq}"
if self.sparse is not False:
s += ", sparse=True"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
|
COCO-LM/fairseq/fairseq/modules/quantization/pq/modules/qemb.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/pq/modules/qemb.py",
"repo_id": "COCO-LM",
"token_count": 1746
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from .multihead_attention import MultiheadAttention
class SparseMultiheadAttention(MultiheadAttention):
"""Sparse Multi-Headed Attention.
"Generating Long Sequences with Sparse Transformers". Implements
fixed factorized self attention, where l=stride and c=expressivity.
A(1) includes all words in the stride window and A(2) takes a summary of c
words from the end of each stride window.
If is_bidirectional=False, we do not include any words past the current word,
as in the paper.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
stride=32,
expressivity=8,
is_bidirectional=True,
):
super().__init__(
embed_dim,
num_heads,
kdim,
vdim,
dropout,
bias,
add_bias_kv,
add_zero_attn,
self_attention,
encoder_decoder_attention,
)
self.is_bidirectional = is_bidirectional
self.stride = stride
self.expressivity = expressivity
assert self.stride > 0 and self.stride >= self.expressivity
# Used for Ai(2) calculations - beginning of [l-c, l] range
def compute_checkpoint(self, word_index):
if word_index % self.stride == 0 and word_index != 0:
checkpoint_index = word_index - self.expressivity
else:
checkpoint_index = (
math.floor(word_index / self.stride) * self.stride
+ self.stride
- self.expressivity
)
return checkpoint_index
# Computes Ai(2)
def compute_subset_summaries(self, absolute_max):
checkpoint_index = self.compute_checkpoint(0)
subset_two = set()
while checkpoint_index <= absolute_max - 1:
summary = set(
range(
checkpoint_index,
min(checkpoint_index + self.expressivity + 1, absolute_max),
)
)
subset_two = subset_two.union(summary)
checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride)
return subset_two
# Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf
def compute_fixed_attention_subset(self, word_index, tgt_len):
# +1s account for range function; [min, max) -> [min, max]
if not self.is_bidirectional:
absolute_max = word_index + 1
else:
absolute_max = tgt_len
# Subset 1 - whole window
rounded_index = (
math.floor((word_index + self.stride) / self.stride) * self.stride
)
if word_index % self.stride == 0 and word_index != 0:
subset_one = set(
range(word_index - self.stride, min(absolute_max, word_index + 1))
)
else:
subset_one = set(
range(
max(0, rounded_index - self.stride),
min(absolute_max, rounded_index + 1),
)
)
# Subset 2 - summary per window
# If bidirectional, subset 2 is the same for every index
subset_two = set()
if not self.is_bidirectional:
subset_two = self.compute_subset_summaries(absolute_max)
return subset_one.union(subset_two)
# Compute sparse mask - if bidirectional, can pre-compute and store
def buffered_sparse_mask(self, tensor, tgt_len, src_len):
assert tgt_len > self.stride
sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf"))
# If bidirectional, subset 2 is the same for every index
subset_summaries = set()
if self.is_bidirectional:
subset_summaries = self.compute_subset_summaries(tgt_len)
for i in range(tgt_len):
fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
included_word_indices = torch.LongTensor(list(fixed_attention_subset))
sparse_mask[i].index_fill_(0, included_word_indices, 0)
return sparse_mask.type_as(tensor)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
sparse_mask = sparse_mask.unsqueeze(0).expand(
bsz * self.num_heads, tgt_len, src_len
)
attn_weights += sparse_mask
|
COCO-LM/fairseq/fairseq/modules/sparse_multihead_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/sparse_multihead_attention.py",
"repo_id": "COCO-LM",
"token_count": 2293
}
| 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adamax")
class FairseqAdamax(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adamax(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--no-bias-correction', default=False, action='store_true',
help='disable bias correction')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"betas": eval(self.args.adamax_betas),
"eps": self.args.adamax_eps,
"weight_decay": self.args.weight_decay,
"bias_correction": not self.args.no_bias_correction,
}
class Adamax(torch.optim.Optimizer):
"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
It has been proposed in `Adam: A Method for Stochastic Optimization`__.
Compared to the version in PyTorch, this version implements a fix for weight decay.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
bias_correction (bool, optional): enable bias correction (default: True)
__ https://arxiv.org/abs/1412.6980
"""
def __init__(
self,
params,
lr=2e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
bias_correction=True,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
bias_correction=bias_correction,
)
super(Adamax, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("Adamax does not support sparse gradients")
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_inf"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_inf"] = state["exp_inf"].to(p_data_fp32)
exp_avg, exp_inf = state["exp_avg"], state["exp_inf"]
beta1, beta2 = group["betas"]
eps = group["eps"]
state["step"] += 1
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# Update the exponentially weighted infinity norm.
torch.max(
exp_inf.mul_(beta2),
grad.abs_(),
out=exp_inf,
)
step_size = group["lr"]
if group["bias_correction"]:
bias_correction = 1 - beta1 ** state["step"]
step_size /= bias_correction
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
COCO-LM/fairseq/fairseq/optim/adamax.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/adamax.py",
"repo_id": "COCO-LM",
"token_count": 3023
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional, List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class PolynomialDecayLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
force_anneal: Optional[int] = field(
default=None,
metadata={"help": "force annealing at specified epoch"},
)
end_learning_rate: float = field(
default=0.0,
metadata={"help": "learning rate to decay to"},
)
power: float = field(
default=1.0,
metadata={"help": "decay exponent"},
)
total_num_update: float = field(
default=II("optimization.max_update"),
metadata={"help": "total number of updates over which to decay learning rate"},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("polynomial_decay", dataclass=PolynomialDecayLRScheduleConfig)
class PolynomialDecayLRSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, cfg: PolynomialDecayLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
assert cfg.total_num_update > 0
self.lr = cfg.lr[0]
if cfg.warmup_updates > 0:
self.warmup_factor = 1.0 / cfg.warmup_updates
else:
self.warmup_factor = 1
self.end_learning_rate = cfg.end_learning_rate
self.total_num_update = cfg.total_num_update
self.power = cfg.power
self.optimizer.set_lr(self.warmup_factor * self.lr)
def get_next_lr(self, epoch):
lrs = self.cfg.lr
if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = self.optimizer.get_lr()
return next_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.cfg.warmup_updates > 0 and num_updates <= self.cfg.warmup_updates:
self.warmup_factor = num_updates / float(self.cfg.warmup_updates)
lr = self.warmup_factor * self.lr
elif num_updates >= self.total_num_update:
lr = self.end_learning_rate
else:
warmup = self.cfg.warmup_updates
lr_range = self.lr - self.end_learning_rate
pct_remaining = 1 - (num_updates - warmup) / (
self.total_num_update - warmup
)
lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate
self.optimizer.set_lr(lr)
return self.optimizer.get_lr()
|
COCO-LM/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py",
"repo_id": "COCO-LM",
"token_count": 1430
}
| 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional
import torch
import torch.nn as nn
from fairseq.token_generation_constraints import (
ConstraintState,
OrderedConstraintState,
UnorderedConstraintState,
)
from torch import Tensor
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
self.stop_on_max_len = False
def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
prev_output_tokens: (bsz x step)
the previously generated oputput tokens
original_batch_idxs: (bsz)
the tensor with the batch indices, in the range [0, bsz)
this is useful in case there has been applied a re-ordering
and we need to know the orignal indices
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class PrefixConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, prefix_allowed_tokens_fn):
super().__init__(tgt_dict)
self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self.stop_on_max_len = True
@torch.jit.export
def apply_mask(self, x, prev_output_tokens, original_batch_idxs):
beam_size = x.shape[0] // original_batch_idxs.shape[0]
original_batch_idxs = (
original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist()
)
mask = torch.full_like(x, -math.inf)
for sent_i, (sent, batch_i) in enumerate(
zip(prev_output_tokens, original_batch_idxs)
):
mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0
return mask
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Tensor,
prev_output_tokens: Tensor,
original_batch_idxs: Tensor,
):
bsz, beam_size, vocab_size = lprobs.size()
lprobs += self.apply_mask(
lprobs.view(bsz * beam_size, 1, vocab_size),
prev_output_tokens,
original_batch_idxs,
).view(bsz, beam_size, vocab_size)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
return scores_buf, indices_buf, beams_buf
class LexicallyConstrainedBeamSearch(Search):
"""Implements lexically constrained beam search as described in
Fast Lexically Constrained Decoding with Dynamic Beam
Allocation for Neural Machine Translation. Post & Vilar,
NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
and
Improved Lexically Constrained Decoding for Translation and
Monolingual Rewriting. Hu et al, NAACL
2019. https://www.aclweb.org/anthology/N19-1090/
This is accomplished by maintaining, for each beam hypothesis, a
ConstraintState object (see constraints.py) that tracks which
constraints have been generated and using this information to
shape the beam for each input sentence.
"""
def __init__(self, tgt_dict, representation):
super().__init__(tgt_dict)
self.representation = representation
self.vocab_size = len(tgt_dict)
self.num_cands = 0
self.supports_constraints = True
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
self.constraint_states = []
for constraint_tensor in batch_constraints:
if self.representation == "ordered":
constraint_state = OrderedConstraintState.create(constraint_tensor)
elif self.representation == "unordered":
constraint_state = UnorderedConstraintState.create(constraint_tensor)
self.constraint_states.append([constraint_state for i in range(beam_size)])
@torch.jit.export
def prune_sentences(self, batch_idxs: Tensor):
self.constraint_states = [
self.constraint_states[i] for i in batch_idxs.tolist()
]
@torch.jit.export
def update_constraints(self, active_hypos: Tensor):
if self.constraint_states:
batch_size = active_hypos.size(0)
for sentid in range(batch_size):
self.constraint_states[sentid] = [
self.constraint_states[sentid][i] for i in active_hypos[sentid]
]
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
"""
A constrained step builds a large candidates list from the following:
- the top 2 * {beam_size} items over the whole beam
- for each item in the beam
- the top {each_k} (default 1)
- all next constraints
We then compute the constrained state of each beam item, and assign
stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
on. We then sort by (stripe, score), and truncate the list at
2 * beam size.
Args:
step: the decoder step
lprobs: (batch size, beam size, target vocab)
the target-vocab distributions for each item in the beam.
Retrun: A tuple of (scores, indices, beams, constraints) where:
scores: (batch, output beam size)
the scores of the chosen elements
indices: (batch, output beam size)
the target vocab indices of the chosen elements
beams: (batch, output beam size)
the 0-indexed hypothesis ids of the chosen elements
constraints: (batch, output beam size)
the new constraint states
"""
each_k = 1
device = lprobs.device
batch_size, beam_size, vocab_size = lprobs.size()
self.num_cands = min(
# Just take the k-best. We'll get another k from the 1-best from each
# row, plus more from the constraints
beam_size * 2,
lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
)
# STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
constraint_states = self.constraint_states
if constraint_states and step > 0:
not_finished_indices = []
for sentno, sent_constraints in enumerate(constraint_states):
for beamno, state in enumerate(sent_constraints):
index = sentno * beam_size + beamno
if not state.finished:
not_finished_indices.append(index)
not_finished_indices = torch.tensor(not_finished_indices)
if not_finished_indices.numel() > 0:
lprobs.view(batch_size * beam_size, -1)[
not_finished_indices, self.eos
] = -math.inf
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam entry for each batch item
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(batch_size, -1),
self.num_cands,
)
scores_buf, indices_buf = top_prediction
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# Short circuit if there are no constraints in this batch
if not constraint_states:
return scores_buf, indices_buf, beams_buf
# STEP 1: get top-1 from each hypothesis across all sentences in the batch
if step > 0:
top_scores, top_indices = torch.topk(
lprobs.view(batch_size * beam_size, -1),
k=each_k,
dim=1,
)
top_scores = top_scores.view(batch_size, -1)
top_indices = top_indices.view(batch_size, -1)
scores_buf = torch.cat((scores_buf, top_scores), dim=1)
indices_buf = torch.cat((indices_buf, top_indices), dim=1)
new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
beams_buf = torch.cat((beams_buf, new_beams), dim=1)
# Now, process sentences in the batch one by one.
new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
for sentno, states in enumerate(constraint_states):
scores, indices, beams, new_states = self.step_sentence(
step,
sentno,
lprobs[sentno],
constraint_states[sentno],
beams_buf[sentno].clone(),
indices_buf[sentno].clone(),
scores_buf[sentno].clone(),
)
new_scores_buf[sentno] = scores
new_indices_buf[sentno] = indices
new_beams_buf[sentno] = beams
self.constraint_states[sentno] = new_states
return new_scores_buf, new_indices_buf, new_beams_buf
@torch.jit.export
def step_sentence(
self,
step: int,
sentno: int,
lprobs: Tensor,
constraint_states: List[List[ConstraintState]],
beams_buf: Tensor,
indices_buf: Tensor,
scores_buf: Tensor,
):
"""Does per-sentence processing. Adds all constraints for each
hypothesis to the list of candidates; then removes duplicates,
sorts, and dynamically stripes across the banks. All tensor inputs
are collapsed to those pertaining to a single input sentence.
"""
device = lprobs.device
# STEP 2: Add all constraints for each beam item
for beamno, state in enumerate(constraint_states):
next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
if next_tokens.numel() != 0:
indices_buf = torch.cat((indices_buf, next_tokens))
next_beams = (
torch.tensor(beamno, device=device)
.repeat(next_tokens.size(0))
.long()
)
beams_buf = torch.cat((beams_buf, next_beams))
next_values = lprobs[beamno].take(next_tokens.view(-1))
scores_buf = torch.cat((scores_buf, next_values))
# At the 0th time step, there is just one beam item
if step == 0:
break
# STEP 3: Compute the "bank" for each candidate. This is the
# number of constraints it's generated. We need this so that
# we can do round-robin allocation of the beam across these
# banks. If C is the number of constraints, we select the best
# item in bank C, then the best in bank C-1, etc, followed by
# the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
# on, until the maximum beam size. We accomplish this by
# creating a sort key and striping across the banks.
# Compute the new states for all candidates
cands_size = indices_buf.size(0)
constraint_states = [
constraint_states[beams_buf[i]].advance(indices_buf[i])
for i in range(cands_size)
]
banks = torch.tensor([state.bank for state in constraint_states], device=device)
# STEP 4: Sort
num_constraint_tokens = len(state.tokens)
# Sort by keys (bank, score) (i.e., sort banks together, and scores
# within banks). AFAIK pytorch doesn't support either stable sort or
# multi-key sorting, so we have to hack this.
MAX_SCORE = -100
sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
banks = banks[sort_indices]
# Sort the constraints to follow suit
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 5: Remove duplicates. The topk calls (overall and
# per-row) plus the per-row generation of constraints will
# produce duplicates. Here we remove them.
def roll(t):
"""Rolls a 1d tensor left by 1.
[0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
"""
return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
# We map candidates (beam, token_id) to a single dimension.
# This is then shifted by 1. We can then easily identify
# duplicates and create a mask that identifies unique
# extensions.
uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf
uniques_mask = roll(uniques_mask) != uniques_mask
# Use the mask to pare down the data structures
scores_buf = torch.masked_select(scores_buf, uniques_mask)
indices_buf = torch.masked_select(indices_buf, uniques_mask)
beams_buf = torch.masked_select(beams_buf, uniques_mask)
banks = torch.masked_select(banks, uniques_mask)
i = 1
for mask in uniques_mask[1:]:
if not mask:
constraint_states.pop(i)
i += mask
# STEP 6: Assign IDs round-robin across banks, sort, and
# truncate. Now that the candidates are sorted by (bank,
# score) and uniqed, we dynamically allocate the {beam_size}
# beam by striping across the candidates. These stripes will
# be used as sort keys to do round-robin selection. This is
# accomplished in a single pass with offsets. Sorting by
# highest-banks (furthest-along hypotheses) first ensures
# progress through the constraints.
#
# e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
# OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
# NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
# = 0 5 10 1 6 11 13 2 7 12 3 8
#
# Sorting by this then gives the following banks:
#
# 3 2 1 0 3 2 1 0 3 2 1 2
#
# We'll take the top {beam_size} of these.
stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
stripes = torch.zeros_like(banks)
cur_bank_count = -1
cur_bank = banks[0]
for i, bank in enumerate(banks):
if bank != cur_bank:
cur_bank_count = 0
cur_bank = bank
else:
cur_bank_count += 1
stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
# STEP 7: Sort by the stripes values
sort_values, sort_indices = stripes.sort(dim=0)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 8: Truncate to the candidates size!
scores_buf = scores_buf[: self.num_cands]
indices_buf = indices_buf[: self.num_cands]
beams_buf = beams_buf[: self.num_cands]
return scores_buf, indices_buf, beams_buf, constraint_states
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
self.needs_src_lengths = True
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step >= max_lens, :, self.eos] = 0
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g,
other=diversity_buf.unsqueeze(1),
alpha=self.diversity_strength,
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = final_indices // k
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
|
COCO-LM/fairseq/fairseq/search.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/search.py",
"repo_id": "COCO-LM",
"token_count": 14454
}
| 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
ConcatSentencesDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
data_utils,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("sentence_ranking")
class SentenceRankingTask(LegacyFairseqTask):
"""
Ranking task on multiple sentences.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", metavar="FILE", help="file prefix for data")
parser.add_argument(
"--num-classes", type=int, help="number of sentences to be ranked"
)
parser.add_argument(
"--init-token",
type=int,
help="add token at the beginning of each batch item",
)
parser.add_argument(
"--separator-token", type=int, help="add separator token between inputs"
)
parser.add_argument("--no-shuffle", action="store_true")
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
parser.add_argument(
"--max-option-length", type=int, help="max length for each option"
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, "input0", "dict.txt"),
source=True,
)
logger.info("[input] dictionary: {} types".format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
return dataset
input0 = make_dataset("input0", self.source_dictionary)
input_options = [
make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary)
for idx in range(self.args.num_classes)
]
if self.args.separator_token is not None:
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if self.args.init_token is not None:
input_option = PrependTokenDataset(input_option, self.args.init_token)
if self.args.max_option_length is not None:
input_option = TruncateDataset(
input_option, self.args.max_option_length
)
src_token = ConcatSentencesDataset(input_option, input0)
src_token = maybe_shorten_dataset(
src_token,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.max_positions,
self.args.seed,
)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for src_token_idx in range(len(src_tokens)):
dataset.update(
{
"net_input{idx}".format(idx=src_token_idx + 1): {
"src_tokens": RightPadDataset(
src_tokens[src_token_idx],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(
src_tokens[src_token_idx], reduce=False
),
}
}
)
label_path = "{}.label".format(get_path("label", split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(
target=RawLabelDataset([int(x.strip()) for x in h.readlines()])
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, "ranking_head_name", "sentence_classification_head"),
num_classes=1,
)
return model
def max_positions(self):
return self.args.max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
COCO-LM/fairseq/fairseq/tasks/sentence_ranking.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/sentence_ranking.py",
"repo_id": "COCO-LM",
"token_count": 3408
}
| 199 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from omegaconf import DictConfig
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
cfg.common_eval.post_process,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(
generator
),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not cfg.common_eval.quiet:
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print(
"H-{}\t{}\t{}".format(sample_id, score, hypo_str),
file=output_file,
)
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
print(
"P-{}\t{}".format(
sample_id,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"]
.div_(math.log(2))
.tolist(),
)
),
),
file=output_file,
)
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
",".join(src_probs)
for src_probs in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_step:
print(
"I-{}\t{}".format(sample_id, hypo["steps"]),
file=output_file,
)
if cfg.generation.retain_iter_history:
for step, h in enumerate(hypo["history"]):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h["tokens"].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print(
"E-{}_{}\t{}".format(sample_id, step, h_str),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or cfg.common_eval.post_process is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(
target_str, add_if_not_exist=True
)
hypo_tokens = tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist=True
)
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/fairseq_cli/generate.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq_cli/generate.py",
"repo_id": "COCO-LM",
"token_count": 8440
}
| 200 |
#include <torch/extension.h>
// CUDA forward declarations
std::vector<at::Tensor> softmax_xentropy_cuda(
const at::Tensor &input,
const at::Tensor &labels,
const bool half_to_float);
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels);
// C++ interface
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std::vector<at::Tensor> softmax_xentropy_forward(
const at::Tensor &input,
const at::Tensor &labels,
const bool half_to_float) {
CHECK_CUDA(input);
CHECK_INPUT(labels);
return softmax_xentropy_cuda(input, labels, half_to_float);
}
at::Tensor softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels) {
CHECK_CUDA(grad_loss);
CHECK_CUDA(logits);
CHECK_INPUT(max_log_sum_exp);
CHECK_INPUT(labels);
return softmax_xentropy_backward_cuda(grad_loss, logits, max_log_sum_exp, labels);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &softmax_xentropy_forward, "Softmax cross entropy loss forward (CUDA)");
m.def("backward", &softmax_xentropy_backward, "Softmax cross entropy loss backward (CUDA)");
}
|
COCO-LM/fairseq/fused_ops/csrc/xentropy/interface.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/csrc/xentropy/interface.cpp",
"repo_id": "COCO-LM",
"token_count": 632
}
| 201 |
#!/usr/bin/env bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Path to pretrained COCO-LM checkpoints
PRETRAINED_MODEL_PATH=$1
# Path to processed SQuAD 2.0 dataset (containing pickle files) 'path/to/squad2_data'
DATA_DIR=$2
# Output path for results and fine-tuned model
OUTPUT_PATH=$3
# Set pretrained model name, from ['cocolm_base', 'cocolm_large']
ARCH=$4
# Set the hyperparameters for the run
N_EPOCH=$5
WARMUP_RATIO=$6
BSZ=$7
LR=$8
SEED=$9
if [ "$ARCH" = "cocolm_base" ]
then
BINS=64
MAX_DIST=128
else
BINS=128
MAX_DIST=256
fi
BETAS="(0.9,0.98)"
CLIP=0.0
WEIGHT_DECAY=0.01
if [ ! -e $PRETRAINED_MODEL_PATH ]; then
echo "Checkpoint doesn't exist"
exit 0
fi
EPOCH_ITER=8218
OPTION="--version-2-with-negative"
BSZ_EXPAND=$((BSZ/16))
EPOCH_ITER=$((EPOCH_ITER/BSZ_EXPAND))
TOTAL_STEPS=$((EPOCH_ITER*N_EPOCH))
WARMUP_STEPS=$((TOTAL_STEPS/WARMUP_RATIO))
VALIDATE_INTERVAL=$((EPOCH_ITER/2))
OUTPUT_PATH=$OUTPUT_PATH/$N_EPOCH-$WARMUP_RATIO-$BSZ-$LR-$SEED
mkdir -p $OUTPUT_PATH
echo $OUTPUT_PATH
if [ -e $OUTPUT_PATH/train_log.txt ]; then
if grep -q 'done training' $OUTPUT_PATH/train_log.txt && grep -q 'Loaded checkpoint' $OUTPUT_PATH/train_log.txt; then
echo "Training log existed"
exit 0
fi
fi
python train.py $DATA_DIR --num-workers 0 --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--restore-file $PRETRAINED_MODEL_PATH \
--max-positions 512 \
--max-sentences $BSZ \
--update-freq 1 \
--task squad \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--arch $ARCH \
--criterion squad $OPTION \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay $WEIGHT_DECAY --optimizer adam --adam-betas "$BETAS" --adam-eps 1e-06 \
--clip-norm $CLIP \
--lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_STEPS --warmup-updates $WARMUP_STEPS \
--max-update $TOTAL_STEPS --seed $SEED --save-dir ./ --no-progress-bar --log-interval 100 --no-epoch-checkpoints --no-last-checkpoints \
--find-unused-parameters --skip-invalid-size-inputs-valid-test \
--best-checkpoint-metric loss --maximize-best-checkpoint-metric --rel-pos 1 --max-rel-pos $MAX_DIST --rel-pos-bins $BINS \
--bpe sentencepiece --sentencepiece-model $DATA_DIR/sp.model --vocab $DATA_DIR/dict.txt --validate-interval-updates $VALIDATE_INTERVAL | tee $OUTPUT_PATH/train_log.txt
|
COCO-LM/fairseq/run_squad.sh/0
|
{
"file_path": "COCO-LM/fairseq/run_squad.sh",
"repo_id": "COCO-LM",
"token_count": 1050
}
| 202 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for decoding"
)
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(l):
return "".join(sp.DecodePieces(l))
elif args.input_format == "id":
def decode(l):
return "".join(sp.DecodeIds(l))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
if args.input_format == "id":
print(decode(list(map(tok2int, line.rstrip().split()))))
elif args.input_format == "piece":
print(decode(line.rstrip().split()))
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/scripts/spm_decode.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/spm_decode.py",
"repo_id": "COCO-LM",
"token_count": 601
}
| 203 |
#!/usr/bin/env python3
import argparse
import os
import unittest
from inspect import currentframe, getframeinfo
import numpy as np
import torch
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq.data import data_utils as fairseq_data_utils
from fairseq.data.dictionary import Dictionary
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqModel,
)
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
# ///////////////////////////////////////////////////////////////////////////
# utility function to setup dummy dict/task/input
# ///////////////////////////////////////////////////////////////////////////
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.tgt_dict = self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
# T max sequence length
# D feature vector dimension
# B batch size
# K target dimension size
feature = torch.randn(B, T, D)
# this (B, T, D) layout is just a convention, you can override it by
# write your own _prepare_forward_input function
src_lengths = torch.from_numpy(
np.random.randint(low=1, high=T, size=B, dtype=np.int64)
)
src_lengths[0] = T # make sure the maximum length matches
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1)
tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(
prev_output_tokens,
pad_idx=1,
eos_idx=2,
left_pad=False,
move_eos_to_beginning=False,
)
src_lengths, sorted_order = src_lengths.sort(descending=True)
forward_input["src_tokens"] = feature.index_select(0, sorted_order)
forward_input["src_lengths"] = src_lengths
forward_input["prev_output_tokens"] = prev_output_tokens
return forward_input
def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
"""
This only provides an example to generate dummy encoder output
"""
(T, B, D) = encoder_out_shape
encoder_out = {}
encoder_out["encoder_out"] = torch.from_numpy(
np.random.randn(*encoder_out_shape).astype(np.float32)
)
seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))
# some dummy mask
encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand(
B, -1
) >= seq_lengths.view(B, 1).expand(-1, T)
encoder_out["encoder_padding_mask"].t_()
# encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate
# whether encoder_out[t, b] is valid (=0) or not (=1)
return encoder_out
def _current_postion_info():
cf = currentframe()
frameinfo = " (at {}:{})".format(
os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno
)
return frameinfo
def check_encoder_output(encoder_output, batch_size=None):
"""we expect encoder_output to be a dict with the following
key/value pairs:
- encoder_out: a Torch.Tensor
- encoder_padding_mask: a binary Torch.Tensor
"""
if not isinstance(encoder_output, dict):
msg = (
"FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info()
)
return False, msg
if "encoder_out" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_out"
+ _current_postion_info()
)
return False, msg
if "encoder_padding_mask" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_padding_mask"
+ _current_postion_info()
)
return False, msg
if not isinstance(encoder_output["encoder_out"], torch.Tensor):
msg = "encoder_out must be a torch.Tensor" + _current_postion_info()
return False, msg
if encoder_output["encoder_out"].dtype != torch.float32:
msg = "encoder_out must have float32 dtype" + _current_postion_info()
return False, msg
mask = encoder_output["encoder_padding_mask"]
if mask is not None:
if not isinstance(mask, torch.Tensor):
msg = (
"encoder_padding_mask must be a torch.Tensor" + _current_postion_info()
)
return False, msg
if mask.dtype != torch.uint8 and (
not hasattr(torch, "bool") or mask.dtype != torch.bool
):
msg = (
"encoder_padding_mask must have dtype of uint8"
+ _current_postion_info()
)
return False, msg
if mask.dim() != 2:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)"
+ _current_postion_info()
)
return False, msg
if batch_size is not None and mask.size(1) != batch_size:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, with size(1)"
+ " being the batch size"
+ _current_postion_info()
)
return False, msg
return True, None
def check_decoder_output(decoder_output):
"""we expect output from a decoder is a tuple with the following constraint:
- the first element is a torch.Tensor
- the second element can be anything (reserved for future use)
"""
if not isinstance(decoder_output, tuple):
msg = "FariseqDecoder output must be a tuple" + _current_postion_info()
return False, msg
if len(decoder_output) != 2:
msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info()
return False, msg
if not isinstance(decoder_output[0], torch.Tensor):
msg = (
"FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info()
)
return False, msg
return True, None
# ///////////////////////////////////////////////////////////////////////////
# Base Test class
# ///////////////////////////////////////////////////////////////////////////
class TestBaseFairseqModelBase(unittest.TestCase):
"""
This class is used to facilitate writing unittest for any class derived from
`BaseFairseqModel`.
"""
@classmethod
def setUpClass(cls):
if cls is TestBaseFairseqModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass
class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
"""
base code to test FairseqEncoderDecoderModel (formally known as
`FairseqModel`) must be derived from this base class
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderDecoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)),
msg="This class only tests for FairseqModel subclasses",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
def setUp(self):
super().setUp()
def test_forward(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
# for FairseqEncoderDecoderModel, forward returns a tuple of two
# elements, the first one is a Torch.Tensor
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
"""
base class to test FairseqEncoderModel
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, FairseqEncoderModel),
msg="This class is only used for testing FairseqEncoderModel",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
super().setUp()
def test_forward(self):
if self.forward_input and self.model:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.model.forward(**self.forward_input)
# we expect forward_output to be a dict with the following
# key/value pairs:
# - encoder_out: a Torch.Tensor
# - encoder_padding_mask: a binary Torch.Tensor
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderBase(unittest.TestCase):
"""
base class to test FairseqEncoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(
isinstance(encoder, FairseqEncoder),
msg="This class is only used for test FairseqEncoder",
)
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if self.encoder and self.forward_input:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.encoder.forward(**self.forward_input)
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
class TestFairseqDecoderBase(unittest.TestCase):
"""
base class to test FairseqDecoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqDecoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(
isinstance(decoder, FairseqDecoder),
msg="This class is only used for test FairseqDecoder",
)
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_encoder_output() if input is None else input
def setUpPrevOutputTokens(self, tokens=None):
if tokens is None:
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input["prev_output_tokens"]
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if (
self.decoder is not None
and self.forward_input is not None
and self.prev_output_tokens is not None
):
forward_output = self.decoder.forward(
prev_output_tokens=self.prev_output_tokens,
encoder_out=self.forward_input,
)
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@classmethod
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
# Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as
# F.binary_cross_entropy_with_logits combines sigmoid and CE
return torch.log(
torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"])
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample)
lprobs.batch_first = True
return lprobs
class DummyEncoder(FairseqEncoder):
def __init__(self):
super().__init__(None)
def forward(self, src_tokens, src_lengths):
mask, max_len = lengths_to_encoder_padding_mask(src_lengths)
return {"encoder_out": src_tokens, "encoder_padding_mask": mask}
class CrossEntropyCriterionTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls is CrossEntropyCriterionTestBase:
raise unittest.SkipTest("Skipping base class test case")
super().setUpClass()
def setUpArgs(self):
args = argparse.Namespace()
args.sentence_avg = False
args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion
return args
def setUp(self):
args = self.setUpArgs()
self.model = DummyEncoderModel(encoder=DummyEncoder())
self.criterion = self.criterion_cls.build_criterion(args, task=DummyTask(args))
def get_src_tokens(self, correct_prediction, aggregate):
"""
correct_prediction: True if the net_output (src_tokens) should
predict the correct target
aggregate: True if the criterion expects net_output (src_tokens)
aggregated across time axis
"""
predicted_idx = 0 if correct_prediction else 1
if aggregate:
src_tokens = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
src_tokens[b][predicted_idx] = 1.0
else:
src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)
for b in range(2):
for t in range(10):
src_tokens[b][t][predicted_idx] = 1.0
return src_tokens
def get_target(self, soft_target):
if soft_target:
target = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
target[b][0] = 1.0
else:
target = torch.zeros((2, 10), dtype=torch.long)
return target
def get_test_sample(self, correct, soft_target, aggregate):
src_tokens = self.get_src_tokens(correct, aggregate)
target = self.get_target(soft_target)
L = src_tokens.size(1)
return {
"net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])},
"target": target,
"ntokens": src_tokens.size(0) * src_tokens.size(1),
}
|
COCO-LM/fairseq/tests/speech_recognition/asr_test_base.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/speech_recognition/asr_test_base.py",
"repo_id": "COCO-LM",
"token_count": 8394
}
| 204 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import tempfile
import unittest
import torch
from fairseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
"A B C D",
"B C D",
"C D",
"D",
]
ref_ids1 = list(
map(
torch.IntTensor,
[
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
],
)
)
ref_ids2 = list(
map(
torch.IntTensor,
[
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
],
)
)
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode="w") as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
def test_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999 #fairseq:overwrite\n"
"<s> 999 #fairseq:overwrite\n"
"</s> 999 #fairseq:overwrite\n"
", 999\n"
"▁de 999\n"
)
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index("<pad>"), 1)
self.assertEqual(d.index("foo"), 3)
self.assertEqual(d.index("<unk>"), 4)
self.assertEqual(d.index("<s>"), 5)
self.assertEqual(d.index("</s>"), 6)
self.assertEqual(d.index(","), 7)
self.assertEqual(d.index("▁de"), 8)
def test_no_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n"
)
d = Dictionary()
with self.assertRaisesRegex(RuntimeError, "Duplicate"):
d.add_from_file(dict_file)
def test_space(self):
# for example, character models treat space as a symbol
dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n")
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index(" "), 4)
self.assertEqual(d.index("a"), 5)
self.assertEqual(d.index("b"), 6)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_dictionary.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_dictionary.py",
"repo_id": "COCO-LM",
"token_count": 1903
}
| 205 |
import contextlib
import unittest
import tempfile
from io import StringIO
import numpy as np
from tests.test_binaries import train_language_model
from tests.utils import create_dummy_data, preprocess_lm_data
try:
from pyarrow import plasma
from fairseq.data.plasma_utils import PlasmaView, PlasmaStore
PYARROW_AVAILABLE = True
except ImportError:
PYARROW_AVAILABLE = False
dummy_path = 'dummy'
@unittest.skipUnless(PYARROW_AVAILABLE, "")
class TestPlasmaView(unittest.TestCase):
def setUp(self) -> None:
self.tmp_file = tempfile.NamedTemporaryFile() # noqa: P201
self.path = self.tmp_file.name
self.server = PlasmaStore.start(path=self.path)
self.client = plasma.connect(self.path, num_retries=10)
def tearDown(self) -> None:
self.client.disconnect()
self.tmp_file.close()
self.server.kill()
def test_two_servers_do_not_share_object_id_space(self):
data_server_1 = np.array([0, 1])
data_server_2 = np.array([2, 3])
server_2_path = self.path
with tempfile.NamedTemporaryFile() as server_1_path:
server = PlasmaStore.start(path=server_1_path.name, nbytes=10000)
arr1 = PlasmaView(
data_server_1, dummy_path, 1, plasma_path=server_1_path.name
)
assert len(arr1.client.list()) == 1
assert (arr1.array == data_server_1).all()
arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=server_2_path)
assert (arr2.array == data_server_2).all()
assert (arr1.array == data_server_1).all()
server.kill()
def test_hash_collision(self):
data_server_1 = np.array([0, 1])
data_server_2 = np.array([2, 3])
arr1 = PlasmaView(data_server_1, dummy_path, 1, plasma_path=self.path)
assert len(arr1.client.list()) == 1
arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=self.path)
assert len(arr1.client.list()) == 1
assert len(arr2.client.list()) == 1
assert (arr2.array == data_server_1).all()
# New hash key based on tuples
arr3 = PlasmaView(
data_server_2, dummy_path, (1, 12312312312, None), plasma_path=self.path
)
assert (
len(arr2.client.list()) == 2
), "No new object was created by using a novel hash key"
assert (
arr3.object_id in arr2.client.list()
), "No new object was created by using a novel hash key"
assert (
arr3.object_id in arr3.client.list()
), "No new object was created by using a novel hash key"
del arr3, arr2, arr1
@staticmethod
def _assert_view_equal(pv1, pv2):
np.testing.assert_array_equal(pv1.array, pv2.array)
def test_putting_same_array_twice(self):
data = np.array([4, 4, 4])
arr1 = PlasmaView(data, dummy_path, 1, plasma_path=self.path)
assert len(self.client.list()) == 1
arr1b = PlasmaView(
data, dummy_path, 1, plasma_path=self.path
) # should not change contents of store
arr1c = PlasmaView(
None, dummy_path, 1, plasma_path=self.path
) # should not change contents of store
assert len(self.client.list()) == 1
self._assert_view_equal(arr1, arr1b)
self._assert_view_equal(arr1, arr1c)
PlasmaView(
data, dummy_path, 2, plasma_path=self.path
) # new object id, adds new entry
assert len(self.client.list()) == 2
new_client = plasma.connect(self.path)
assert len(new_client.list()) == 2 # new client can access same objects
assert isinstance(arr1.object_id, plasma.ObjectID)
del arr1b
del arr1c
def test_plasma_store_full_raises(self):
with tempfile.NamedTemporaryFile() as new_path:
server = PlasmaStore.start(path=new_path.name, nbytes=10000)
with self.assertRaises(plasma.PlasmaStoreFull):
# 2000 floats is more than 2000 bytes
PlasmaView(
np.random.rand(10000, 1), dummy_path, 1, plasma_path=new_path.name
)
server.kill()
def test_object_id_overflow(self):
PlasmaView.get_object_id("", 2 ** 21)
def test_training_lm_plasma(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
["--use-plasma-view", "--plasma-path", self.path],
run_validation=True,
)
|
COCO-LM/fairseq/tests/test_plasma_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_plasma_utils.py",
"repo_id": "COCO-LM",
"token_count": 2240
}
| 206 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# The script is largely adapted from the huggingface transformers library
import os
import logging
from collections import Counter
import torch
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Dictionary:
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
):
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.alias_mapper = {'<s>': '[CLS]', '<pad>': '[PAD]', '</s>':'[SEP]', '<unk>': '[UNK]', '<mask>': '[MASK]',
'[CLS]': '[CLS]', '[PAD]': '[PAD]', '[SEP]':'[SEP]', '[UNK]': '[UNK]', '[MASK]': '[MASK]'}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.alias_mapper:
sym = self.alias_mapper[sym]
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.alias_mapper:
word = self.alias_mapper[word]
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict, word):
"""Updates counts from new dictionary."""
if word in self.alias_mapper:
word = self.alias_mapper[word]
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
# with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line_idx, line in enumerate(lines[indices_start_line:]):
try:
splits = line.rstrip().rsplit(" ", 1)
line = splits[0]
field = splits[1] if len(splits) > 1 else str(len(lines) - line_idx)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
logger.info(
"Duplicate word found when loading Dictionary: '{}', index is {}.".format(word, self.indices[word])
)
else:
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
|
COCO-LM/huggingface/cocolm/tokenization_utils.py/0
|
{
"file_path": "COCO-LM/huggingface/cocolm/tokenization_utils.py",
"repo_id": "COCO-LM",
"token_count": 3407
}
| 207 |
# ------------------------------------------
# CSWin Transformer
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Xiaoyi Dong
# ------------------------------------------
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import models
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import Dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import load_checkpoint, create_model, resume_checkpoint, convert_splitbn_model
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
from checkpoint_saver import CheckpointSaver
from labeled_memcached_dataset import McDataset
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
import warnings
warnings.filterwarnings('ignore')
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='CSWin Training and Evaluating')
# Dataset / Model parameters
parser.add_argument('--data', default='/mnt/blob/testset/ImageNet', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='CSWin_64_12211_tiny_224', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--eval_checkpoint', default='', type=str, metavar='PATH',
help='path to eval checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=224, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.005 for adamw)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=20, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "const")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.0)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=True,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.99992,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=8, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--use-chk', action='store_true', default=False,
help='Enable tracking moving average of model weights')
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed and args.num_gpu > 1:
_logger.warning(
'Using more than one GPU per process in distributed mode is not allowed.Setting num_gpu to 1.')
args.num_gpu = 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.num_gpu = 1
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
assert args.rank >= 0
if args.distributed:
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on %d GPUs.' % args.num_gpu)
torch.manual_seed(args.seed + args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
checkpoint_path=args.initial_checkpoint,
img_size=args.img_size,
use_chk=args.use_chk)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print ('Num:', count_parameters(model)/1e6)
if args.local_rank == 0:
_logger.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
use_amp = None
if args.amp:
# for backwards compat, `--amp` arg tries apex before native amp
if has_apex:
args.apex_amp = True
elif has_native_amp:
args.native_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
if args.num_gpu > 1:
if use_amp == 'apex':
_logger.warning(
'Apex AMP does not work well with nn.DataParallel, disabling. Use DDP or Torch AMP.')
use_amp = None
model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
assert not args.channels_last, "Channels last not supported with DP, use DDP."
else:
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
optimizer = create_optimizer(args, model)
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=args.resume)
if args.distributed:
if args.sync_bn:
assert not args.split_bn
try:
if has_apex and use_amp != 'native':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
except Exception as e:
_logger.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')
if has_apex and use_amp != 'native':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank], find_unused_parameters=True) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
train_dir = os.path.join(args.data, 'train')
if not os.path.exists(train_dir):
_logger.error('Training folder does not exist at: {}'.format(train_dir))
exit(1)
dataset_train = McDataset(args.data, './dataset/ILSVRC2012_name_train.txt', 'train')
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
if args.aa == 'None':
args.aa = None
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader
)
eval_dir = os.path.join(args.data, 'val')
if not os.path.isdir(eval_dir):
eval_dir = os.path.join(args.data, 'validation')
if not os.path.isdir(eval_dir):
_logger.error('Validation folder does not exist at: {}'.format(eval_dir))
exit(1)
dataset_eval = McDataset(args.data, './dataset/ILSVRC2012_name_val.txt', 'val')
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
if args.eval_checkpoint: # evaluate the model
load_checkpoint(model, args.eval_checkpoint, args.model_ema)
val_metrics = validate(model, loader_eval, validate_loss_fn, args)
print(f"Top-1 accuracy of the model is: {val_metrics['top1']:.1f}%")
return
saver = None
output_dir = ''
if args.local_rank == 0:
output_base = args.output if args.output else './output'
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
args.model,
str(data_config['input_size'][-1])
])
output_dir = get_outdir(output_base, 'train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try: # train the model
for epoch in range(start_epoch, num_epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_metrics = train_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.ema, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters(), create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Time: {rate:>4.0f}/s ({rate_avg:>4.0f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.sum:.3f}'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
|
CSWin-Transformer/main.py/0
|
{
"file_path": "CSWin-Transformer/main.py",
"repo_id": "CSWin-Transformer",
"token_count": 16147
}
| 208 |
{% extends "main.html" %}
<!-- Render hero under tabs -->
{% block tabs %}
{{ super() }}
<!-- github button -->
<script async defer src="https://buttons.github.io/buttons.js"></script>
<style>
.md-footer-copyright {
display: none
}
.md-footer-nav__inner {
display: none
}
.md-content {
display: none
}
.tx-container {
height: fit-content;
padding-top: 0rem;
background: linear-gradient(var(--md-primary-fg-color), var(--md-primary-fg-color--dark) 80%, #fff 90%);
}
.tx-hero {
color: var(--md-primary-bg-color);
justify-content: center;
margin: 32px 2.5rem;
}
.tx-hero h1 {
margin-bottom: 0rem;
color: currentColor;
font-weight: 700
}
.tx-hero__content {
padding-bottom: 6rem;
margin: 0 auto;
/* justify-content: left;
padding-right: 3rem; */
}
.tx-hero__content h1 {
font-size: 2.5rem;
}
.tx-hero__content_small {
justify-content: left;
padding-right: 3rem;
}
.tx-hero__image {
max-width: 100%;
max-height: 100%;
order: 1;
padding-right: 1.5rem;
padding-bottom: 4rem;
}
.tx-hero .md-button {
margin-top: .5rem;
margin-right: .5rem;
color: var(--md-primary-bg-color)
}
.tx-container-2 {
padding: 0rem;
background-color: white;
margin-bottom: 0px;
}
.tx-hero__image-2 {
max-width: 100%;
max-height: 100%;
order: 1;
padding-right: 0.1rem;
padding-left: 0.25rem;
padding-top: 10px;
padding-bottom: 10px;
float: left;
}
.tx-hero__content-2 {
margin-left: 50px;
justify-content: left;
/* color: #009485; */
font-weight: 300;
padding: 0 0px;
padding-bottom: 40px;
word-break: break-word;
float: right;
}
.tx-hero__content-2 h1 {
margin-top: 10px;
color: black;
/* color: #009485; */
font-weight: 600;
/* font-size: 36px; */
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
line-height: normal;
/*font-family: inherit;*/
}
.tx-hero__content-2 p {
font-size: 17px;
line-height: 1.8em;
text-rendering: optimizeLegibility;
color: black;
display: block;
}
.tx-container-3 {
height: auto;
}
@media screen and (min-width: 60em) {
.md-sidebar--secondary {
display: none
}
.tx-hero {
display: flex;
align-items: center;
justify-content: center;
}
.tx-hero__content {
max-width: 28rem;
margin-top: 3.5rem;
margin-bottom: 3.5rem;
margin-left: 1.0rem;
margin-right: 4.0rem;
align-items: center;
}
}
@media screen and (min-width: 76.25em) {
.md-sidebar--primary {
display: none
}
.top-hr {
width: 100%;
display: flex;
max-width: 61rem;
margin-right: auto;
margin-left: auto;
padding: 0 .2rem;
}
.bottom-hr {
margin-top: 10px;
width: 100%;
display: flex;
max-width: 61rem;
margin-right: auto;
margin-left: auto;
padding: 0 .2rem;
}
}
</style>
<section class="tx-container">
<div class="md-grid md-typeset">
<div class="tx-hero">
<div class="tx-hero__image">
<img
src="assets/images/climax-logo.png"
alt=""
width="320"
draggable="false"
>
</div>
<div class="tx-hero__content">
<h1 id="climax">ClimaX</h1>
<p>A foundation model for weather and climate.</p>
<br>
<a href="https://arxiv.org/abs/2301.10343" class="md-button md-button--primary">Paper</a>
<a
href="{{ page.next_page.url | url }}"
title="{{ page.next_page.title | e }}"
class="md-button md-button--primary"
>
Get started
</a><br>
<a href="https://github.com/microsoft/ClimaX" class="md-button">Contribute on GitHub
<img
class = icon
src="assets/images/icons/github-white.svg"
alt=""
width="24"
draggable="false"
>
</a>
</div>
</div>
</div>
</section>
<section class="tx-container-2">
<div class ="md-grid md-typeset">
<div class="tx-hero__image-2">
<img
src="assets/images/climax-coverfigure.png"
alt=""
width="600"
draggable="false"
>
</div>
<div class="tx-hero">
<div class="tx-hero__content-2">
<h1 id="what-is-climax">What is ClimaX?</h1><br>
<p>
▶️
<strong>
ClimaX is the first foundation model for weather and climate science.
</strong><br>
▶️
<strong>
Simple, flexible, and easy to use.
</strong><br>
▶️
<strong>
Ample examples for the workflow to apply to various downstream tasks ranging from weather forecasting to climate downscaling.
</strong><br>
▶️
<strong>
Supports efficient scalable distributed training, powered by <a href="https://www.pytorchlightning.ai/">PyTorch Lightning</a>.
</strong><br>
</p>
</div>
<br>
</div>
</div>
</section>
<section class="tx-container-3">
<div class ="md-grid md-typeset">
<div class="tx-hero__content-2">
<h1 id="citation">Citation</h1>
<div class="highlight"><pre><span></span><code><a id="__codelineno-0-1" name="__codelineno-0-1" href="#__codelineno-0-1"></a><span class="nc">@article</span><span class="p">{</span><span class="nl">nguyen2023climax</span><span class="p">,</span>
<a id="__codelineno-0-2" name="__codelineno-0-2" href="#__codelineno-0-2"></a><span class="w"> </span><span class="na">title</span><span class="p">=</span><span class="s">{ClimaX: A foundation model for weather and climate}</span><span class="p">,</span>
<a id="__codelineno-0-3" name="__codelineno-0-3" href="#__codelineno-0-3"></a><span class="w"> </span><span class="na">author</span><span class="p">=</span><span class="s">{Nguyen, Tung and Brandstetter, Johannes and Kapoor, Ashish and Gupta, Jayesh K and Grover, Aditya}</span><span class="p">,</span>
<a id="__codelineno-0-4" name="__codelineno-0-4" href="#__codelineno-0-4"></a><span class="w"> </span><span class="na">journal</span><span class="p">=</span><span class="s">{arXiv preprint arXiv:2301.10343}</span><span class="p">,</span>
<a id="__codelineno-0-5" name="__codelineno-0-5" href="#__codelineno-0-5"></a><span class="w"> </span><span class="na">year</span><span class="p">=</span><span class="s">{2023}</span>
<a id="__codelineno-0-6" name="__codelineno-0-6" href="#__codelineno-0-6"></a><span class="p">}</span>
</code></pre></div>
<p>Also consider starring <a href="https://github.com/microsoft/ClimaX">the github repo</a>.
<a class="github-button" href="https://github.com/microsoft/ClimaX" data-icon="octicon-star" data-show-count="true" aria-label="Star microsoft/ClimaX on GitHub">Star</a> </p>
</div>
</div>
</section>
{% endblock %}
<!-- Footer Section -->
{% block footer %}{% endblock %}
|
ClimaX/docs/overrides/home.html/0
|
{
"file_path": "ClimaX/docs/overrides/home.html",
"repo_id": "ClimaX",
"token_count": 4406
}
| 209 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP
name: 10m_v_component_of_wind
cmip_name: vas
era_name: v10
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190710
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/MPI-ESM/config_10m_v_component_of_wind.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_10m_v_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 127
}
| 210 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor, opt=None):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
self.opt = opt
if gan_mode == 'ls':
pass
elif gan_mode == 'original':
pass
elif gan_mode == 'w':
pass
elif gan_mode == 'hinge':
pass
else:
raise ValueError('Unexpected gan_mode {}'.format(gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input)
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input)
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input).type_as(input)
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'original': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'ls':
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
if isinstance(input, list):
loss = 0
for pred_i in input:
if isinstance(pred_i, list):
pred_i = pred_i[-1]
loss_tensor = self.loss(pred_i, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
|
CoCosNet-v2/models/networks/loss.py/0
|
{
"file_path": "CoCosNet-v2/models/networks/loss.py",
"repo_id": "CoCosNet-v2",
"token_count": 1815
}
| 211 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import re
import argparse
from argparse import Namespace
import torch
import numpy as np
import importlib
from PIL import Image
def feature_normalize(feature_in, eps=1e-10):
feature_in_norm = torch.norm(feature_in, 2, 1, keepdim=True) + eps
feature_in_norm = torch.div(feature_in, feature_in_norm)
return feature_in_norm
def weighted_l1_loss(input, target, weights):
out = torch.abs(input - target)
out = out * weights.expand_as(out)
loss = out.mean()
return loss
def mse_loss(input, target=0):
return torch.mean((input - target)**2)
def vgg_preprocess(tensor, vgg_normal_correct=False):
if vgg_normal_correct:
tensor = (tensor + 1) / 2
# input is RGB tensor which ranges in [0,1]
# output is BGR tensor which ranges in [0,255]
tensor_bgr = torch.cat((tensor[:, 2:3, :, :], tensor[:, 1:2, :, :], tensor[:, 0:1, :, :]), dim=1)
# tensor_bgr = tensor[:, [2, 1, 0], ...]
tensor_bgr_ml = tensor_bgr - torch.Tensor([0.40760392, 0.45795686, 0.48501961]).type_as(tensor_bgr).view(1, 3, 1, 1)
tensor_rst = tensor_bgr_ml * 255
return tensor_rst
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def find_class_in_module(target_cls_name, module):
target_cls_name = target_cls_name.replace('_', '').lower()
clslib = importlib.import_module(module)
cls = None
for name, clsobj in clslib.__dict__.items():
if name.lower() == target_cls_name:
cls = clsobj
if cls is None:
print("In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name))
exit(0)
return cls
def save_network(net, label, epoch, opt):
save_filename = '%s_net_%s.pth' % (epoch, label)
save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)
torch.save(net.cpu().state_dict(), save_path)
if len(opt.gpu_ids) and torch.cuda.is_available():
net.cuda()
def load_network(net, label, epoch, opt):
save_filename = '%s_net_%s.pth' % (epoch, label)
save_dir = os.path.join(opt.checkpoints_dir, opt.name)
save_path = os.path.join(save_dir, save_filename)
if not os.path.exists(save_path):
print('not find model :' + save_path + ', do not load model!')
return net
weights = torch.load(save_path)
try:
net.load_state_dict(weights)
except KeyError:
print('key error, not load!')
except RuntimeError as err:
print(err)
net.load_state_dict(weights, strict=False)
print('loaded with strict = False')
print('Load from ' + save_path)
return net
def print_current_errors(opt, epoch, i, num, errors, t):
message = '(epoch: %d, iters: %d, finish: %.2f%%, time: %.3f) ' % (epoch, i, (i/num)*100.0, t)
for k, v in errors.items():
v = v.mean().float()
message += '%s: %.3f ' % (k, v)
print(message)
log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(log_name, "a") as log_file:
log_file.write('%s\n' % message)
|
CoCosNet-v2/util/util.py/0
|
{
"file_path": "CoCosNet-v2/util/util.py",
"repo_id": "CoCosNet-v2",
"token_count": 1436
}
| 212 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
# Given the option --dataset [datasetname],
# the file "datasets/datasetname_dataset.py"
# will be imported.
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
# In the file, the class called DatasetNameDataset() will
# be instantiated. It has to be a subclass of BaseDataset,
# and it is case-insensitive.
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise ValueError("In %s.py, there should be a subclass of BaseDataset "
"with class name that matches %s in lowercase." %
(dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataloader(opt):
dataset = find_dataset_using_name(opt.dataset_mode)
instance = dataset()
instance.initialize(opt)
print("dataset [%s] of size %d was created" %
(type(instance).__name__, len(instance)))
dataloader = torch.utils.data.DataLoader(
instance,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
drop_last=opt.isTrain
)
return dataloader
|
CoCosNet/data/__init__.py/0
|
{
"file_path": "CoCosNet/data/__init__.py",
"repo_id": "CoCosNet",
"token_count": 777
}
| 213 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
from collections import OrderedDict, namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from util.util import feature_normalize, mse_loss
import matplotlib.pyplot as plt
import torchvision
import numpy as np
postpa = torchvision.transforms.Compose([
torchvision.transforms.Lambda(lambda x: x.mul_(1. / 255)),
torchvision.transforms.Normalize(
mean=[-0.40760392, -0.45795686, -0.48501961], #add imagenet mean
std=[1, 1, 1]),
torchvision.transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]), #turn to RGB
])
postpb = torchvision.transforms.Compose([torchvision.transforms.ToPILImage()])
def post_processing(tensor):
t = postpa(tensor) # denormalize the image since the optimized tensor is the normalized one
t[t > 1] = 1
t[t < 0] = 0
img = postpb(t)
img = np.array(img)
return img
class ContextualLoss(nn.Module):
'''
input is Al, Bl, channel = 1, range ~ [0, 255]
'''
def __init__(self):
super(ContextualLoss, self).__init__()
return None
def forward(self, X_features, Y_features, h=0.1, feature_centering=True):
'''
X_features&Y_features are are feature vectors or feature 2d array
h: bandwidth
return the per-sample loss
'''
batch_size = X_features.shape[0]
feature_depth = X_features.shape[1]
feature_size = X_features.shape[2]
# center the feature vector???
# to normalized feature vectors
if feature_centering:
X_features = X_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
Y_features = Y_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
X_features = feature_normalize(X_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size^2
Y_features = feature_normalize(Y_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size^2
# conine distance = 1 - similarity
X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
d = 1 - torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
# normalized distance: dij_bar
d_norm = d / (torch.min(d, dim=-1, keepdim=True)[0] + 1e-5) # batch_size * feature_size^2 * feature_size^2
# pairwise affinity
w = torch.exp((1 - d_norm) / h)
A_ij = w / torch.sum(w, dim=-1, keepdim=True)
# contextual loss per sample
CX = torch.mean(torch.max(A_ij, dim=1)[0], dim=-1)
loss = -torch.log(CX)
# contextual loss per batch
# loss = torch.mean(loss)
return loss
class ContextualLoss_forward(nn.Module):
'''
input is Al, Bl, channel = 1, range ~ [0, 255]
'''
def __init__(self, opt):
super(ContextualLoss_forward, self).__init__()
self.opt = opt
return None
def forward(self, X_features, Y_features, h=0.1, feature_centering=True):
'''
X_features&Y_features are are feature vectors or feature 2d array
h: bandwidth
return the per-sample loss
'''
batch_size = X_features.shape[0]
feature_depth = X_features.shape[1]
feature_size = X_features.shape[2]
# to normalized feature vectors
if feature_centering:
if self.opt.PONO:
X_features = X_features - Y_features.mean(dim=1).unsqueeze(dim=1)
Y_features = Y_features - Y_features.mean(dim=1).unsqueeze(dim=1)
else:
X_features = X_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
Y_features = Y_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
X_features = feature_normalize(X_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size * feature_size
Y_features = feature_normalize(Y_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size * feature_size
# X_features = F.unfold(
# X_features, kernel_size=self.opt.match_kernel, stride=1, padding=int(self.opt.match_kernel // 2)) # batch_size * feature_depth_new * feature_size^2
# Y_features = F.unfold(
# Y_features, kernel_size=self.opt.match_kernel, stride=1, padding=int(self.opt.match_kernel // 2)) # batch_size * feature_depth_new * feature_size^2
# conine distance = 1 - similarity
X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
d = 1 - torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
# normalized distance: dij_bar
# d_norm = d
d_norm = d / (torch.min(d, dim=-1, keepdim=True)[0] + 1e-3) # batch_size * feature_size^2 * feature_size^2
# pairwise affinity
w = torch.exp((1 - d_norm) / h)
A_ij = w / torch.sum(w, dim=-1, keepdim=True)
# contextual loss per sample
CX = torch.mean(torch.max(A_ij, dim=-1)[0], dim=1)
loss = -torch.log(CX)
# contextual loss per batch
# loss = torch.mean(loss)
return loss
class ContextualLoss_complex(nn.Module):
'''
input is Al, Bl, channel = 1, range ~ [0, 255]
'''
def __init__(self):
super(ContextualLoss_complex, self).__init__()
return None
def forward(self, X_features, Y_features, h=0.1, patch_size=1, direction='forward'):
'''
X_features&Y_features are are feature vectors or feature 2d array
h: bandwidth
return the per-sample loss
'''
batch_size = X_features.shape[0]
feature_depth = X_features.shape[1]
feature_size = X_features.shape[2]
# to normalized feature vectors
# TODO: center by the mean of Y_features
X_features = X_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
Y_features = Y_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
X_features = feature_normalize(X_features) # batch_size * feature_depth * feature_size^2
Y_features = feature_normalize(Y_features) # batch_size * feature_depth * feature_size^2
# to normalized feature vectors
X_features = F.unfold(
X_features, kernel_size=(patch_size, patch_size), stride=(1, 1), padding=(patch_size // 2,
patch_size // 2)) # batch_size * feature_depth_new * feature_size^2
Y_features = F.unfold(
Y_features, kernel_size=(patch_size, patch_size), stride=(1, 1), padding=(patch_size // 2,
patch_size // 2)) # batch_size * feature_depth_new * feature_size^2
# conine distance = 1 - similarity
X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
d = 1 - torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
# normalized distance: dij_bar
d_norm = d / (torch.min(d, dim=-1, keepdim=True)[0] + 1e-5) # batch_size * feature_size^2 * feature_size^2
# pairwise affinity
w = torch.exp((1 - d_norm) / h)
A_ij = w / torch.sum(w, dim=-1, keepdim=True)
# contextual loss per sample
if direction == 'forward':
CX = torch.mean(torch.max(A_ij, dim=-1)[0], dim=1)
else:
CX = torch.mean(torch.max(A_ij, dim=1)[0], dim=-1)
loss = -torch.log(CX)
return loss
class ChamferDistance_patch_loss(nn.Module):
'''
input is Al, Bl, channel = 1, range ~ [0, 255]
'''
def __init__(self):
super(ChamferDistance_patch_loss, self).__init__()
return None
def forward(self, X_features, Y_features, patch_size=3, image_x=None, image_y=None, h=0.1, Y_features_in=None):
'''
X_features&Y_features are are feature vectors or feature 2d array
h: bandwidth
return the per-sample loss
'''
batch_size = X_features.shape[0]
feature_depth = X_features.shape[1]
feature_size = X_features.shape[2]
# to normalized feature vectors
X_features = F.unfold(
X_features, kernel_size=(patch_size, patch_size), stride=(1, 1), padding=(patch_size // 2,
patch_size // 2)) # batch_size, feature_depth_new * feature_size^2
Y_features = F.unfold(
Y_features, kernel_size=(patch_size, patch_size), stride=(1, 1), padding=(patch_size // 2,
patch_size // 2)) # batch_size, feature_depth_new * feature_size^2
if image_x is not None and image_y is not None:
image_x = torch.nn.functional.interpolate(image_x, size=(feature_size, feature_size), mode='bilinear').view(batch_size, 3, -1)
image_y = torch.nn.functional.interpolate(image_y, size=(feature_size, feature_size), mode='bilinear').view(batch_size, 3, -1)
X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
similarity_matrix = torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
NN_index = similarity_matrix.max(dim=-1, keepdim=True)[1].squeeze()
if Y_features_in is not None:
loss = torch.mean((X_features - Y_features_in.detach())**2)
Y_features_in = Y_features_in.detach()
else:
loss = torch.mean((X_features - Y_features[:, :, NN_index].detach())**2)
Y_features_in = Y_features[:, :, NN_index].detach()
# re-arrange image
if image_x is not None and image_y is not None:
image_y_rearrange = image_y[:, :, NN_index]
image_y_rearrange = image_y_rearrange.view(batch_size, 3, feature_size, feature_size)
image_x = image_x.view(batch_size, 3, feature_size, feature_size)
image_y = image_y.view(batch_size, 3, feature_size, feature_size)
# plt.figure()
# plt.imshow((post_processing(image_x[0].detach().cpu())))
# plt.title('image x')
# plt.figure()
# plt.imshow((image_y[0]).permute(1, 2, 0).cpu().numpy())
# plt.title('image y')
# plt.figure()
# plt.imshow((image_y_rearrange[0]).permute(1, 2, 0).cpu().numpy())
# plt.title('corresponded image y')
# plt.show()
return loss
class ChamferDistance_loss(nn.Module):
'''
input is Al, Bl, channel = 1, range ~ [0, 255]
'''
def __init__(self):
super(ChamferDistance_loss, self).__init__()
return None
def forward(self, X_features, Y_features, image_x, image_y, h=0.1, Y_features_in=None):
'''
X_features&Y_features are are feature vectors or feature 2d array
h: bandwidth
return the per-sample loss
'''
batch_size = X_features.shape[0]
feature_depth = X_features.shape[1]
feature_size = X_features.shape[2]
# to normalized feature vectors
X_features = feature_normalize(X_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size^2
Y_features = feature_normalize(Y_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size^2
image_x = torch.nn.functional.interpolate(image_x, size=(feature_size, feature_size), mode='bilinear').view(batch_size, 3, -1)
image_y = torch.nn.functional.interpolate(image_y, size=(feature_size, feature_size), mode='bilinear').view(batch_size, 3, -1)
X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
similarity_matrix = torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
NN_index = similarity_matrix.max(dim=-1, keepdim=True)[1].squeeze()
if Y_features_in is not None:
loss = torch.mean((X_features - Y_features_in.detach())**2)
Y_features_in = Y_features_in.detach()
else:
loss = torch.mean((X_features - Y_features[:, :, NN_index].detach())**2)
Y_features_in = Y_features[:, :, NN_index].detach()
# re-arrange image
image_y_rearrange = image_y[:, :, NN_index]
image_y_rearrange = image_y_rearrange.view(batch_size, 3, feature_size, feature_size)
image_x = image_x.view(batch_size, 3, feature_size, feature_size)
image_y = image_y.view(batch_size, 3, feature_size, feature_size)
# plt.figure()
# plt.imshow((post_processing(image_x[0].detach().cpu())))
# plt.title('image x')
# plt.figure()
# plt.imshow((image_y[0]).permute(1, 2, 0).cpu().numpy())
# plt.title('image y')
# plt.figure()
# plt.imshow((image_y_rearrange[0]).permute(1, 2, 0).cpu().numpy())
# plt.title('corresponded image y')
# plt.show()
return loss, Y_features_in, X_features
# class ChamferDistance_loss(nn.Module):
# '''
# input is Al, Bl, channel = 1, range ~ [0, 255]
# '''
# def __init__(self):
# super(ChamferDistance_loss, self).__init__()
# return None
# def forward(self, X_features, Y_features, image_x, image_y):
# '''
# X_features&Y_features are are feature vectors or feature 2d array
# h: bandwidth
# return the per-sample loss
# '''
# batch_size = X_features.shape[0]
# feature_depth = X_features.shape[1]
# feature_size = X_features.shape[2]
# # to normalized feature vectors
# X_features = feature_normalize(X_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size^2
# Y_features = feature_normalize(Y_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size^2
# image_x = torch.nn.functional.interpolate(image_x, size=(feature_size, feature_size), mode='bilinear').view(batch_size, 3, -1)
# image_y = torch.nn.functional.interpolate(image_y, size=(feature_size, feature_size), mode='bilinear').view(batch_size, 3, -1)
# X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
# similarity_matrix = torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
# NN_index = similarity_matrix.max(dim=-1, keepdim=True)[1].squeeze()
# loss = torch.mean((X_features - Y_features[:, :, NN_index].detach())**2)
# # re-arrange image
# image_y_rearrange = image_y[:, :, NN_index]
# image_y_rearrange = image_y_rearrange.view(batch_size, 3, feature_size, feature_size)
# image_x = image_x.view(batch_size, 3, feature_size, feature_size)
# image_y = image_y.view(batch_size, 3, feature_size, feature_size)
# # plt.figure()
# # plt.imshow((post_processing(image_x[0].detach().cpu())))
# # plt.title('image x')
# # plt.figure()
# # plt.imshow((image_y[0]).permute(1, 2, 0).cpu().numpy())
# # plt.title('image y')
# # plt.figure()
# # plt.imshow((image_y_rearrange[0]).permute(1, 2, 0).cpu().numpy())
# # plt.title('corresponded image y')
# # plt.show()
# return loss
if __name__ == "__main__":
contextual_loss = ContextualLoss()
batch_size = 32
feature_depth = 8
feature_size = 16
X_features = torch.zeros(batch_size, feature_depth, feature_size, feature_size)
Y_features = torch.zeros(batch_size, feature_depth, feature_size, feature_size)
cx_loss = contextual_loss(X_features, Y_features, 1)
print(cx_loss)
|
CoCosNet/models/networks/ContextualLoss.py/0
|
{
"file_path": "CoCosNet/models/networks/ContextualLoss.py",
"repo_id": "CoCosNet",
"token_count": 7439
}
| 214 |
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import gzip
import os
import json
import numpy as np
from more_itertools import chunked
DATA_DIR='../data/codesearch'
def format_str(string):
for char in ['\r\n', '\r', '\n']:
string = string.replace(char, ' ')
return string
def preprocess_test_data(language, test_batch_size=1000):
path = os.path.join(DATA_DIR, '{}_test_0.jsonl.gz'.format(language))
print(path)
with gzip.open(path, 'r') as pf:
data = pf.readlines()
idxs = np.arange(len(data))
data = np.array(data, dtype=np.object)
np.random.seed(0) # set random seed so that random things are reproducible
np.random.shuffle(idxs)
data = data[idxs]
batched_data = chunked(data, test_batch_size)
print("start processing")
for batch_idx, batch_data in enumerate(batched_data):
if len(batch_data) < test_batch_size:
break # the last batch is smaller than the others, exclude.
examples = []
for d_idx, d in enumerate(batch_data):
line_a = json.loads(str(d, encoding='utf-8'))
doc_token = ' '.join(line_a['docstring_tokens'])
for dd in batch_data:
line_b = json.loads(str(dd, encoding='utf-8'))
code_token = ' '.join([format_str(token) for token in line_b['code_tokens']])
example = (str(1), line_a['url'], line_b['url'], doc_token, code_token)
example = '<CODESPLIT>'.join(example)
examples.append(example)
data_path = os.path.join(DATA_DIR, 'test/{}'.format(language))
if not os.path.exists(data_path):
os.makedirs(data_path)
file_path = os.path.join(data_path, 'batch_{}.txt'.format(batch_idx))
print(file_path)
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(examples))
if __name__ == '__main__':
languages = ['go', 'php', 'python', 'java', 'javascript', 'ruby']
for lang in languages:
preprocess_test_data(lang)
|
CodeBERT/CodeBERT/codesearch/process_data.py/0
|
{
"file_path": "CodeBERT/CodeBERT/codesearch/process_data.py",
"repo_id": "CodeBERT",
"token_count": 941
}
| 215 |
# CodeReviewer
This repo provides the code for reproducing the experiments in [CodeReviewer: Pre-Training for Automating Code Review Activities](https://arxiv.org/abs/2203.09095). **CodeReviewer** is a model pre-trained with code change and code review data to support code review tasks.
The pre-trained checkpoint of CodeReivewer is available in [Huggingface](https://huggingface.co/microsoft/codereviewer).
Our dataset is available in [Zenodo](https://zenodo.org/record/6900648).
## 1. Dependency
- conda install nltk
- conda install pytorch torchvision torchaudio cudatoolkit=10.2 -c pytorch
- conda install transformers
## 2. Brief Introduction
CodeReviewer supports for three related tasks: **Quality Estimation** (`cls` for short), **Comment Generation** (`msg` for short) and **Code Refinement** (`ref` for short).
Demo data:
``` python
{
"old_file": "import torch", # f1
"diff_hunk": "@@ -1 +1,2 @@\n import torch\n +import torch.nn as nn", # f1->f2
"comment": "I don't think we need to import torch.nn here.", # requirements for f2->f3
"target": "import torch" # f3
}
```
* Quality Estimation: input with "old_file" and "diff_hunk", we need to predict that whether the code change is not good and needs a comment.
* Comment Generation: input with "old_file" and "diff_hunk", we need to generate a comment for the change. An expected comment is as the "comment" above.
* Code Refinement: input with "old_file", "diff_hunk", and "comment", we need to change the code again according to the review comment. For the above example, as the comment indicated we don't need *import torch.nn*, we just delete this line of code here.
The model inputs are code change (old file and diff hunk) and review comment (optional according to task). Input data is preprocessed in `utils.py: ReviewExample` and wrapped to {`utils.py: CommentClsDataset, SimpleGenDataset, RefineDataset`}
## 3. Finetune/Inference
Before you start to run experiments with CodeReviewer, please download the [datasets](https://zenodo.org/record/6900648) first.
```bash
# prepare model checkpoint and datasets
cd code/sh
# adjust the arguments in the *sh* scripts
bash finetune-cls.sh
```
A demo bash script (finetune-cls.sh) is shown:
```bash
mnt_dir="/home/codereview"
# You may change the following block for multiple gpu training
MASTER_HOST=localhost && echo MASTER_HOST: ${MASTER_HOST}
MASTER_PORT=23333 && echo MASTER_PORT: ${MASTER_PORT}
RANK=0 && echo RANK: ${RANK}
PER_NODE_GPU=1 && echo PER_NODE_GPU: ${PER_NODE_GPU}
WORLD_SIZE=1 && echo WORLD_SIZE: ${WORLD_SIZE}
NODES=1 && echo NODES: ${NODES}
NCCL_DEBUG=INFO
bash test_nltk.sh
# Change the arguments as required:
# model_name_or_path, load_model_path: the path of the model to be finetuned
# eval_file: the path of the evaluation data
# output_dir: the directory to save finetuned model (not used at infer/test time)
# out_file: the path of the output file
# train_file_name: can be a directory contraining files named with "train*.jsonl"
python -m torch.distributed.launch --nproc_per_node ${PER_NODE_GPU} --node_rank=${RANK} --nnodes=${NODES} --master_addr=${MASTER_HOST} --master_port=${MASTER_PORT} ../run_finetune_cls.py \
--train_epochs 30 \
--model_name_or_path microsoft/codereviewer \
--output_dir ../../save/cls \
--train_filename ../../dataset/Diff_Quality_Estimation \
--dev_filename ../../dataset/Diff_Quality_Estimation/cls-valid.jsonl \
--max_source_length 512 \
--max_target_length 128 \
--train_batch_size 12 \
--learning_rate 3e-4 \
--gradient_accumulation_steps 3 \
--mask_rate 0.15 \
--save_steps 3600 \
--log_steps 100 \
--train_steps 120000 \
--gpu_per_node=${PER_NODE_GPU} \
--node_index=${RANK} \
--seed 2233
```
## 4. File structure
```
.
├── bleu.py # demo code for BLEU evaluation
├── configs.py
├── evaluator # copied from CodeXGlue for BLEU evaluation
├── models.py # CodeReviewer model
├── run_finetune_xxx.py # finetune script - xxx in {cls, msg, gen}
├── run_infer_msg.py # inference script for comment generation task
├── run_test_xxx.py # test script - xxx in {cls, msg, gen}
├── sh/xx.sh # bash script for running finetune and test scripts with arguments
│ ├── finetune-xxx.sh
│ ├── infer-json.sh
│ ├── test-xxx.sh
│ ├── test_nltk.sh
└── utils.py # utils for data preprocessing
```
# Reference
If you use this code or CodeReviewer, please consider citing us.
<pre><code>@article{li2022codereviewer,
title={CodeReviewer: Pre-Training for Automating Code Review Activities},
author={Li, Zhiyu and Lu, Shuai and Guo, Daya and Duan, Nan and Jannu, Shailesh and Jenks, Grant and Majumder, Deep and Green, Jared and Svyatkovskiy, Alexey and Fu, Shengyu and others},
journal={arXiv preprint arXiv:2203.09095},
year={2022}
}</code></pre>
|
CodeBERT/CodeReviewer/README.md/0
|
{
"file_path": "CodeBERT/CodeReviewer/README.md",
"repo_id": "CodeBERT",
"token_count": 1736
}
| 216 |
# Code Pretraining Models
This repo contains code pretraining models in the CodeBERT series from Microsoft, including six models as of June 2023.
- CodeBERT (EMNLP 2020)
- GraphCodeBERT (ICLR 2021)
- UniXcoder (ACL 2022)
- CodeReviewer (ESEC/FSE 2022)
- CodeExecutor (ACL 2023)
- LongCoder (ICML 2023)
# CodeBERT
This repo provides the code for reproducing the experiments in [CodeBERT: A Pre-Trained Model for Programming and Natural Languages](https://arxiv.org/pdf/2002.08155.pdf). CodeBERT is a pre-trained model for programming language, which is a multi-programming-lingual model pre-trained on NL-PL pairs in 6 programming languages (Python, Java, JavaScript, PHP, Ruby, Go).
### Dependency
- pip install torch
- pip install transformers
### Quick Tour
We use huggingface/transformers framework to train the model. You can use our model like the pre-trained Roberta base. Now, We give an example on how to load the model.
```python
import torch
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = RobertaTokenizer.from_pretrained("microsoft/codebert-base")
model = RobertaModel.from_pretrained("microsoft/codebert-base")
model.to(device)
```
### NL-PL Embeddings
Here, we give an example to obtain embedding from CodeBERT.
```python
>>> from transformers import AutoTokenizer, AutoModel
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/codebert-base")
>>> model = AutoModel.from_pretrained("microsoft/codebert-base")
>>> nl_tokens=tokenizer.tokenize("return maximum value")
['return', 'Ġmaximum', 'Ġvalue']
>>> code_tokens=tokenizer.tokenize("def max(a,b): if a>b: return a else return b")
['def', 'Ġmax', '(', 'a', ',', 'b', '):', 'Ġif', 'Ġa', '>', 'b', ':', 'Ġreturn', 'Ġa', 'Ġelse', 'Ġreturn', 'Ġb']
>>> tokens=[tokenizer.cls_token]+nl_tokens+[tokenizer.sep_token]+code_tokens+[tokenizer.eos_token]
['<s>', 'return', 'Ġmaximum', 'Ġvalue', '</s>', 'def', 'Ġmax', '(', 'a', ',', 'b', '):', 'Ġif', 'Ġa', '>', 'b', ':', 'Ġreturn', 'Ġa', 'Ġelse', 'Ġreturn', 'Ġb', '</s>']
>>> tokens_ids=tokenizer.convert_tokens_to_ids(tokens)
[0, 30921, 4532, 923, 2, 9232, 19220, 1640, 102, 6, 428, 3256, 114, 10, 15698, 428, 35, 671, 10, 1493, 671, 741, 2]
>>> context_embeddings=model(torch.tensor(tokens_ids)[None,:])[0]
torch.Size([1, 23, 768])
tensor([[-0.1423, 0.3766, 0.0443, ..., -0.2513, -0.3099, 0.3183],
[-0.5739, 0.1333, 0.2314, ..., -0.1240, -0.1219, 0.2033],
[-0.1579, 0.1335, 0.0291, ..., 0.2340, -0.8801, 0.6216],
...,
[-0.4042, 0.2284, 0.5241, ..., -0.2046, -0.2419, 0.7031],
[-0.3894, 0.4603, 0.4797, ..., -0.3335, -0.6049, 0.4730],
[-0.1433, 0.3785, 0.0450, ..., -0.2527, -0.3121, 0.3207]],
grad_fn=<SelectBackward>)
```
### Probing
As stated in the paper, CodeBERT is not suitable for mask prediction task, while CodeBERT (MLM) is suitable for mask prediction task.
We give an example on how to use CodeBERT(MLM) for mask prediction task.
```python
from transformers import RobertaConfig, RobertaTokenizer, RobertaForMaskedLM, pipeline
model = RobertaForMaskedLM.from_pretrained("microsoft/codebert-base-mlm")
tokenizer = RobertaTokenizer.from_pretrained("microsoft/codebert-base-mlm")
CODE = "if (x is not None) <mask> (x>1)"
fill_mask = pipeline('fill-mask', model=model, tokenizer=tokenizer)
outputs = fill_mask(CODE)
print(outputs)
```
Results
```python
'and', 'or', 'if', 'then', 'AND'
```
The detailed outputs are as follows:
```python
{'sequence': '<s> if (x is not None) and (x>1)</s>', 'score': 0.6049249172210693, 'token': 8}
{'sequence': '<s> if (x is not None) or (x>1)</s>', 'score': 0.30680200457572937, 'token': 50}
{'sequence': '<s> if (x is not None) if (x>1)</s>', 'score': 0.02133703976869583, 'token': 114}
{'sequence': '<s> if (x is not None) then (x>1)</s>', 'score': 0.018607674166560173, 'token': 172}
{'sequence': '<s> if (x is not None) AND (x>1)</s>', 'score': 0.007619690150022507, 'token': 4248}
```
### Downstream Tasks
For Code Search and Code Documentation Generation tasks, please refer to the [CodeBERT](https://github.com/microsoft/CodeBERT/tree/master/CodeBERT) folder.
# GraphCodeBERT
This repo also provides the code for reproducing the experiments in [GraphCodeBERT: Pre-training Code Representations with Data Flow](https://openreview.net/pdf?id=jLoC4ez43PZ). GraphCodeBERT is a pre-trained model for programming language that considers the inherent structure of code i.e. data flow, which is a multi-programming-lingual model pre-trained on NL-PL pairs in 6 programming languages (Python, Java, JavaScript, PHP, Ruby, Go).
For downstream tasks like code search, clone detection, code refinement and code translation, please refer to the [GraphCodeBERT](https://github.com/microsoft/CodeBERT/tree/master/GraphCodeBERT) folder.
# UniXcoder
This repo will provide the code for reproducing the experiments in [UniXcoder: Unified Cross-Modal Pre-training for Code Representation](https://arxiv.org/pdf/2203.03850.pdf). UniXcoder is a unified cross-modal pre-trained model for programming languages to support both code-related understanding and generation tasks.
Please refer to the [UniXcoder](https://github.com/microsoft/CodeBERT/tree/master/UniXcoder) folder for tutorials and downstream tasks.
# CodeReviewer
This repo also provides the code for reproducing the experiments in [CodeReviewer: Pre-Training for Automating Code Review Activities](https://arxiv.org/abs/2203.09095). CodeReviewer is a model pre-trained with code change and code review data to support code review tasks.
Please refer to the [CodeReviewer](https://github.com/microsoft/CodeBERT/tree/master/CodeReviewer) folder for tutorials and downstream tasks.
# CodeExecutor
This repo provides the code for reproducing the experiments in [Code Execution with Pre-trained Language Models](https://arxiv.org/pdf/2305.05383.pdf). CodeExecutor is a pre-trained model that learns to predict the execution traces using a code execution pre-training task and curriculum learning.
Please refer to the [CodeExecutor](https://github.com/microsoft/CodeBERT/tree/master/CodeExecutor) folder for details.
# LongCoder
This repo will provide the code for reproducing the experiments on LCC datasets in [LongCoder: A Long-Range Pre-trained Language Model for Code Completion](https://arxiv.org/abs/2306.14893). LongCoder is a sparse and efficient pre-trained Transformer model for long code modeling.
Please refer to the [LongCoder](https://github.com/microsoft/CodeBERT/tree/master/LongCoder) folder for details.
## Contact
Feel free to contact Daya Guo ([email protected]), Shuai Lu ([email protected]) and Nan Duan ([email protected]) if you have any further questions.
## Contributing
We appreciate all contributions and thank all the contributors!
<p align="center">
<img src="https://contributors-img.web.app/image?repo=microsoft/CodeBERT" />
</p>
|
CodeBERT/README.md/0
|
{
"file_path": "CodeBERT/README.md",
"repo_id": "CodeBERT",
"token_count": 2413
}
| 217 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.