text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
data "azurerm_subscription" "current" {}
data "azurerm_client_config" "current" {}
# Random unique id
resource "random_string" "unique_id" {
length = 4
min_numeric = 4
}
data "azurerm_container_registry" "mgmt_acr" {
name = var.acr_name
resource_group_name = var.mgmt_resource_group_name
}
data "http" "myip" {
count = var.public_deployment_ip_address == "" ? 1 : 0
url = "https://ipecho.net/plain"
}
data "azurerm_monitor_diagnostic_categories" "api" {
resource_id = azurerm_linux_web_app.api.id
depends_on = [
azurerm_linux_web_app.api,
azurerm_service_plan.core,
]
}
data "azurerm_monitor_diagnostic_categories" "sb" {
resource_id = azurerm_servicebus_namespace.sb.id
depends_on = [
azurerm_servicebus_namespace.sb
]
}
|
AzureTRE/core/terraform/data.tf/0
|
{
"file_path": "AzureTRE/core/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 332
}
| 107 |
output "core_vnet_id" {
value = azurerm_virtual_network.core.id
}
output "bastion_subnet_id" {
value = azurerm_subnet.bastion.id
}
output "azure_firewall_subnet_id" {
value = azurerm_subnet.azure_firewall.id
}
output "app_gw_subnet_id" {
value = azurerm_subnet.app_gw.id
}
output "web_app_subnet_id" {
value = azurerm_subnet.web_app.id
}
output "shared_subnet_id" {
value = azurerm_subnet.shared.id
}
output "airlock_processor_subnet_id" {
value = azurerm_subnet.airlock_processor.id
}
output "airlock_storage_subnet_id" {
value = azurerm_subnet.airlock_storage.id
}
output "airlock_events_subnet_id" {
value = azurerm_subnet.airlock_events.id
}
output "resource_processor_subnet_id" {
value = azurerm_subnet.resource_processor.id
}
# DNS Zones
output "azure_monitor_dns_zone_id" {
value = azurerm_private_dns_zone.azure_monitor.id
}
output "azure_monitor_oms_opinsights_dns_zone_id" {
value = azurerm_private_dns_zone.azure_monitor_oms_opinsights.id
}
output "azure_monitor_ods_opinsights_dns_zone_id" {
value = azurerm_private_dns_zone.azure_monitor_ods_opinsights.id
}
output "azure_monitor_agentsvc_dns_zone_id" {
value = azurerm_private_dns_zone.azure_monitor_agentsvc.id
}
output "blob_core_dns_zone_id" {
value = azurerm_private_dns_zone.blobcore.id
}
output "azurewebsites_dns_zone_id" {
value = azurerm_private_dns_zone.azurewebsites.id
}
output "static_web_dns_zone_id" {
value = azurerm_private_dns_zone.static_web.id
}
output "file_core_dns_zone_id" {
value = azurerm_private_dns_zone.filecore.id
}
output "queue_core_dns_zone_id" {
value = azurerm_private_dns_zone.private_dns_zones["privatelink.queue.core.windows.net"].id
}
output "table_core_dns_zone_id" {
value = azurerm_private_dns_zone.private_dns_zones["privatelink.table.core.windows.net"].id
}
|
AzureTRE/core/terraform/network/outputs.tf/0
|
{
"file_path": "AzureTRE/core/terraform/network/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 784
}
| 108 |
variable "tre_id" {
type = string
description = "Unique identifier for the TRE, such as projectx-dev-1234"
validation {
condition = length(var.tre_id) < 12 && lower(var.tre_id) == var.tre_id
error_message = "The tre_id value must be lowercase and < 12 chars."
}
}
variable "location" {
type = string
description = "Azure region for deployment of core TRE services"
}
variable "acr_name" {
type = string
description = "Management ACR name"
}
variable "core_address_space" {
type = string
description = "Core services VNET Address Space"
validation {
condition = parseint(element(split("/", var.core_address_space), 1), 10) > 0 && parseint(element(split("/", var.core_address_space), 1), 10) <= 22
error_message = "core_address_space size should be /22 or larger"
}
}
variable "tre_address_space" {
type = string
description = "Overall TRE Address Space pool, will be used for workspace VNETs, can be a supernet of address_space."
}
variable "api_image_repository" {
type = string
description = "Repository for API image"
default = "microsoft/azuretre/api"
}
variable "core_app_service_plan_sku" {
type = string
default = "P1v3"
}
variable "resource_processor_vmss_porter_image_repository" {
type = string
description = "Repository for resource processor vmms porter image"
default = "microsoft/azuretre/resource-processor-vm-porter"
}
variable "mgmt_storage_account_name" {
type = string
description = "Storage account created by bootstrap to hold all Terraform state"
}
variable "mgmt_resource_group_name" {
type = string
description = "Shared management resource group"
}
variable "terraform_state_container_name" {
type = string
description = "Name of the storage container for Terraform state"
}
variable "resource_processor_number_processes_per_instance" {
type = number
default = 5
description = "The number of CPU processes to run the RP on per VM instance"
}
variable "enable_swagger" {
type = bool
default = false
description = "Determines whether the Swagger interface for the API will be available."
sensitive = false
}
variable "swagger_ui_client_id" {
type = string
description = "The client id (app id) of the registration in Azure AD for the Swagger UI"
sensitive = true
}
variable "aad_tenant_id" {
type = string
description = "The tenant id of the Azure AD used for authentication."
sensitive = true
}
variable "api_client_id" {
type = string
description = "The client id (app id) of the registration in Azure AD for the API."
sensitive = true
}
variable "api_client_secret" {
type = string
description = "A client secret used by the API to authenticate with Azure AD for access to Microsoft Graph."
sensitive = true
}
variable "application_admin_client_id" {
type = string
description = "The client id (app id) of the registration in Azure AD for creating AAD Applications."
sensitive = true
}
variable "application_admin_client_secret" {
type = string
description = "A client secret used by the Resource Processor to authenticate with Azure AD to create AAD Applications."
sensitive = true
}
variable "resource_processor_type" {
default = "vmss_porter"
description = "Which resource processor to deploy."
type = string
}
variable "resource_processor_vmss_sku" {
type = string
default = "Standard_B2s"
description = "The SKU of the resource processor VMSS."
}
variable "arm_environment" {
type = string
default = "public"
description = "Used as an environment variable in the VMSS to set the Azure cloud for Terraform"
}
variable "arm_use_msi" {
type = bool
default = false
description = "Used as an environment variable to determine if Terraform should use a managed identity"
}
variable "stateful_resources_locked" {
type = bool
default = true
description = "Used to add locks on resources with state"
}
variable "ci_git_ref" {
default = ""
description = "The git ref used by the ci to deploy this TRE"
type = string
}
variable "enable_local_debugging" {
default = false
description = "This will allow Cosmos to be accessible from your local IP address and add some extra role permissions."
type = bool
}
# this var is optional and used to avoid assigning a role on every run.
variable "arm_subscription_id" {
description = "The subscription id to create the resource processor permission/role. If not supplied will use the TF context."
type = string
default = ""
}
variable "public_deployment_ip_address" {
description = "Your local IP address if https://ipecho.net/plain is blocked."
type = string
default = ""
}
variable "enable_airlock_malware_scanning" {
type = bool
default = false
description = "If False, Airlock requests will skip the malware scanning stage"
}
variable "rp_bundle_values" {
description = "Additional environment values to set on the resource processor that can be supplied to template bundles"
type = map(string)
default = {}
}
variable "is_cosmos_defined_throughput" {
type = bool
default = false
}
variable "logging_level" {
type = string
default = "INFO"
description = "The logging level for the API and Resource Processor"
validation {
condition = contains(["INFO", "DEBUG", "WARNING", "ERROR"], var.logging_level)
error_message = "logging_level must be one of ERROR, WARNING, INFO, DEBUG"
}
}
|
AzureTRE/core/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/core/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 1885
}
| 109 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
if [ "$(yq eval ".custom.runtime_image.build" porter.yaml)" == "null" ]; then
echo "Runtime image build section isn't specified. Exiting..."
exit 0
fi
image_name=$(yq eval ".custom.runtime_image.name" porter.yaml)
version_file=$(yq eval ".custom.runtime_image.build.version_file" porter.yaml)
version_line=$(cat "${version_file}")
# doesn't work with quotes
# shellcheck disable=SC2206
version_array=( ${version_line//=/ } ) # split by =
version="${version_array[1]//\"}" # second element is what we want, remove " chars
docker push "${FULL_IMAGE_NAME_PREFIX}/${image_name}:${version}"
|
AzureTRE/devops/scripts/bundle_runtime_image_push.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/bundle_runtime_image_push.sh",
"repo_id": "AzureTRE",
"token_count": 258
}
| 110 |
#!/bin/bash
# This script migrates the Cosmos database based on any breaking changes that have occurred.
# Cosmos is behind a private network, so we call the /migrations endpoint of the API
set -o errexit
set -o pipefail
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
function usage() {
cat <<USAGE
Usage: $0 [-u --tre_url] [-c --current] [-i --insecure]
Options:
-u, --tre_url URL for the TRE
-a, --access-token Azure access token to automatically post to the API
-i, --insecure Bypass SSL certificate checks
USAGE
exit 1
}
function get_http_code() {
curl_output="$1"
http_code=$(echo "${curl_output}" | grep HTTP | sed 's/.*HTTP\/1\.1 \([0-9]\+\).*/\1/' | tail -n 1)
}
curl_options=(--retry 3 --retry-max-time 300 --max-time 90)
# if no arguments are provided, return usage function
if [ $# -eq 0 ]; then
usage # run usage function
fi
while [ "$1" != "" ]; do
case $1 in
-u | --tre_url)
shift
tre_url=$1
;;
-a | --access-token)
shift
access_token=$1
;;
-i| --insecure)
curl_options+=("-k")
;;
*)
echo "Unexpected argument: '$1'"
usage
;;
esac
if [[ -z "$2" ]]; then
# if no more args then stop processing
break
fi
shift # remove the current value for `$1` and use the next
done
# done with processing args and can set this
set -o nounset
if [[ -z ${tre_url:-} ]]; then
echo -e "No TRE URI provided.\n"
usage
fi
if [ -z "${access_token:-}" ]; then
# If access token isn't set, try to obtain it
if [ -z "${ACCESS_TOKEN:-}" ]
then
echo "API access token isn't available - migrating state store not possible. "
exit 1
fi
access_token=${ACCESS_TOKEN}
fi
migrate_result=$(curl -i -X "POST" "${tre_url}/api/migrations" "${curl_options[@]}" \
-H "accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer ${access_token}")
get_http_code "${migrate_result}"
echo "${migrate_result}"
if [[ ${http_code} != 202 ]]; then
echo "Error while migrating state store"
exit 1
fi
|
AzureTRE/devops/scripts/migrate_state_store.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/migrate_state_store.sh",
"repo_id": "AzureTRE",
"token_count": 918
}
| 111 |
terraform {
required_version = ">= 1.2.5"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "3.51.0"
}
}
}
|
AzureTRE/devops/terraform/terraform.tf/0
|
{
"file_path": "AzureTRE/devops/terraform/terraform.tf",
"repo_id": "AzureTRE",
"token_count": 79
}
| 112 |
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Operation id.",
"format": "uuid"
},
"operationType": {
"type": "string",
"description": "Operation type.",
"enum": [
"Create",
"Update",
"Delete"
]
},
"status": {
"type": "string",
"description": "Operation status.",
"enum": [
"Pending",
"Processing",
"Succeeded",
"Failed"
]
},
"resourceId": {
"type": "string",
"description": "Resource id.",
"format": "uuid"
},
"resourceVersion": {
"type": "string",
"description": "Resource version."
},
"createdAt": {
"type": "string",
"description": "Operation creation time."
},
"lastUpdatedAt": {
"type": "string",
"description": "Operation last update time."
},
"events": {
"type": "array",
"description": "Operation event.",
"items": {"$ref": "#/$defs/event" }
},
"etag": {
"type": "string",
"description": "ETag."
}
},
"required": [
"id",
"operationType",
"status",
"resourceId",
"resourceVersion",
"createdAt",
"lastUpdatedAt",
"events",
"etag"
],
"additionalProperties": false,
"$defs": {
"event": {
"type": "object",
"properties": {
"timeStamp": {
"type": "string",
"description": "Event timestamp."
},
"description": {
"type": "string",
"description": "Event message."
},
"from": {
"type": "string",
"description": "Event originator."
},
"severity": {
"type": "string",
"description": "Event classification.",
"enum": [
"low",
"medium",
"high",
"critical"
]
},
"eventType": {
"type": "string",
"description": "Event type."
}
},
"required": [
"timeStamp",
"message",
"from",
"severity",
"eventType"
]
}
}
}
|
AzureTRE/docs/schemas/operation.json/0
|
{
"file_path": "AzureTRE/docs/schemas/operation.json",
"repo_id": "AzureTRE",
"token_count": 1813
}
| 113 |
# Configuring Shared Services
In general, a shared service should be installed by using the UI or API directly once its bundle has been registered on the system.
As part of this quickstart, you will need to install the Nexus shared service, as you will be subsequently deploying Guacamole VMs that depend on public package respositories to bootstrap. Due to the TRE's Firewall blocking public access, Nexus will proxy these package repositories and make them available within the TRE for the VMs to consume.
## Deploy & configure Nexus service
!!! caution
Before deploying the Nexus service, you will need workspaces of version `0.3.2` or above due to a dependency on a DNS zone link for the workspace(s) to connect to the Nexus VM.
Before deploying the Nexus shared service, you need to make sure that it will have access to a certificate to configure serving secure proxies. By default, the Nexus service will serve proxies from `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/`, and thus it requires a certificate that validates ownership of this domain to use for SSL.
You can use the Certs Shared Service to set one up by following these steps:
1. Run the below command in your terminal to build, publish and register the certs bundle:
```cmd
make shared_service_bundle BUNDLE=certs
```
2. Navigate to the TRE UI, click on Shared Services in the navigation menu and click *Create new*.
3. Select the Certs template, then fill in the required details. *Domain prefix* should be set to `nexus` and *Cert name* should be `nexus-ssl`.
!!! caution
If you have KeyVault Purge Protection enabled and are re-deploying your environment using the same `cert_name`, you may encounter this: `Status=409 Code=\"Conflict\" Message=\"Certificate nexus-ssl is currently in a deleted but recoverable state`. You need to either manually recover the certificate or purge it before redeploying.
Once deployed, the certs service will use Letsencrypt to generate a certificate for the specified domain prefix followed by `-{TRE_ID}.{LOCATION}.cloudapp.azure.com`, so in our case, having entered `nexus`, this will be `nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com`, which will be the public domain for our Nexus service.
You can verify whether this has been successful by navigating to your core keyvault (`kv-{TRE_ID}`) and looking for a certificate called `nexus-ssl` (or whatever you called it).
After verifying the certificate has been generated, you can deploy Nexus:
1. Run the below command in your terminal to build, publish and register the Nexus shared service bundle:
```cmd
make shared_service_bundle BUNDLE=sonatype-nexus-vm
```
1. Navigate back to the TRE UI, and click *Create new* again within the Shared Services page.
1. Select the Nexus template then fill in the required details. The *SSL certificate name* should default to `nexus-ssl`, so there's no need to change it unless you gave it a different name in the previous step.
This will deploy the infrastructure required for Nexus, then start the service and configure it with the repository configurations located in the `./templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config` folder. It will also set up HTTPS using the certificate you generated in the previous section, so proxies can be served at `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com`.
You can optionally go to the Nexus web interface by visiting `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/` in the jumpbox and signing in with the username `admin` and the password secret located in your core keyvault, with the key `nexus-admin-password`. Here you should be able to see all of the configured repositories and you can use the UI to manage settings etc.
Just bear in mind that if this service is redeployed any changes made in the Nexus UI won't be persisted. If you wish to permanently add new repositories or alter existing ones, modify the JSON files within the `./nexus_repos_config` directory and redeploy.
You can view further information on the Nexus shared service [here](../../tre-templates/shared-services/nexus.md).
## (Optional) Install Gitea and configure repositories
Gitea allows you to mirror Git repositories to make them available within the TRE. This is optional as part of this quickstart.
Note : This is a Gitea *shared service* which will be accessible from all workspaces intended for mirroring external Git repositories. A Gitea *workspace service* can also be deployed per workspace to enable Gitea to be used within a specific workspace.
To install Gitea, first register the template:
```cmd
make shared_service_bundle BUNDLE=gitea
```
You can then install via the TRE UI in the same way you did for the Nexus bundle.
By default, this Gitea instance does not have any repositories configured. You can add repositories to Gitea either by using the command line or by using the Gitea web interface.
### Command Line
Make sure you run the following commands using git bash and set your current directory as C:/AzureTRE.
1. On the jumbox, run:
```./templates/workspace_services/gitea/gitea_migrate_repo.sh -t <tre_id> -g <URL_of_github_repo_to_migrate>```
1. If you have issues with token or token doesn't work, you can reset the token by setting it's value to null in Key Vault:
```az keyvault secret set --name gitea-<tre-id>-admin-token --vault-name kv-<tre-id> --value null```
### Gitea Web Interface
1. on the jumbox, open Edge and go to:
```https://gitea-<TRE_ID>.azurewebsites.net/```
1. Authenticate yourself using username ```giteaadmin``` and the secret ```<gitea-TRE_ID-administrator-password>``` stored in the keyvault,
1. Add the repository of your choice
### Verify can access the mirrored repository
From a virtual machine within a workspace:
- Command line: ```git clone https://gitea-<TRE_ID>.azurewebsites.net/giteaadmin/<NameOfrepository>```
- Gitea Web Interface: ```https://gitea-<TRE_ID>.azurewebsites.net/```
More info on the Gitea shared service is available [here](../../tre-templates/shared-services/gitea.md).
## Next steps
* [Install Base Workspace](installing-base-workspace.md)
|
AzureTRE/docs/tre-admins/setup-instructions/configuring-shared-services.md/0
|
{
"file_path": "AzureTRE/docs/tre-admins/setup-instructions/configuring-shared-services.md",
"repo_id": "AzureTRE",
"token_count": 1643
}
| 114 |
# Upgrading AzureTRE version
This document will cover how Azure TRE is referenced and how to upgrade its version in the [Azure TRE deployment repository](https://github.com/microsoft/AzureTRE-Deployment)
## Introduction
Azure TRE is referenced as an external folder in the [Azure TRE deployment repository](https://github.com/Microsoft/AzureTRE-Deployment) (which is used as a template for your project in the quick start guide). A specific version of Azure TRE is downloaded as part of the devcontainer setup.
A symlink is then created making it available to reference in the directory itself (it is available only for reference, any changes to it are gitignored).
## How to upgrade the Azure TRE version
> Please check the release notes before upgrading.
- If using the [Azure TRE deployment repository](https://github.com/microsoft/AzureTRE-Deployment) directly (not one created using a Template), you need to git pull the latest version.
- If using a repository created from the `AzureTRE-Deployment` template, then run the following git commands in your own repo:
```sh
git remote add upstream https://github.com/Microsoft/AzureTRE-Deployment
git pull upstream main --allow-unrelated-histories
```
This will pull the latest version of AzureTRE to your copy of the repository. You may need to resolve merge conflicts if you have made edits.
The `git remote add` command is only necessary the first time you upgrade your TRE version. After the first time, you only need to execute the `git pull` command.
Once the code is merged, follow the same process used to initially deploy the TRE to upgrade the TRE. This might mean running a command such as `make all`, or running your CI/CD pipeline(s).
> If running commands manually, please ensure that you build and push the container images. Running `make tre-deploy` alone will update the infrastructure but not the container images. `make all` runs all the required commands.
## Deploying a specific version of Azure TRE
If you wish to upgrade or deploy a specific version, or unreleased version of Azure TRE and are using the [Azure TRE deployment repository](https://github.com/Microsoft/AzureTRE-Deployment) you can change the value of `OSS_VERSION` in `.devcontainer/devcontainer.json`, for example:
- `"OSS_VERSION": "v0.9.0"` (to use the specified tag; be sure to specify the complete tag name (prefixed with `v` and not the release name)
- `"OSS_VERSION": "main"` (to use the latest code in the "main" branch)
- `"OSS_VERSION": "1c6ff35ec9246e53b86e93b9da5b97911edc71c1"` (to use the code at the time of the commit identified by the hash)
## Deploying a fork of Azure TRE
If you wish to deploy the Azure TRE from a forked repository you can change the value of `OSS_REPO` in `.devcontainer/devcontainer.json`, for example:
- `"OSS_REPO": "microsoft/AzureTRE"` (the default)
- `"OSS_REPO": "myorg/AzureTRE"` (to point to fork of the Azure TRE in your GitHub organisation)
When changing `OSS_REPO` ensure the `OSS_VERSION` variable refers to a GitHub ref on the repository fork.
|
AzureTRE/docs/tre-admins/upgrading-tre.md/0
|
{
"file_path": "AzureTRE/docs/tre-admins/upgrading-tre.md",
"repo_id": "AzureTRE",
"token_count": 801
}
| 115 |
# Nexus Shared Service
Sonatype Nexus (RepoManager) allows users in workspaces to access external software packages securely.
Documentation on Nexus can be found here: [https://help.sonatype.com/repomanager3/](https://help.sonatype.com/repomanager3/).
## Deploy
!!! caution
Before deploying the Nexus service, you will need workspaces of version `0.3.2` or above due to a dependency on a DNS zone link for the workspace(s) to connect to the Nexus VM.
Before deploying the Nexus shared service, you need to make sure that it will have access to a certificate to configure serving secure proxies. By default, the Nexus service will serve proxies from `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/`, and thus it requires a certificate that validates ownership of this domain to use for SSL.
You can use the Certs Shared Service to set one up by following these steps:
1. Run the below command in your terminal to build, publish and register the certs bundle:
```cmd
make shared_service_bundle BUNDLE=certs
```
2. Navigate to the TRE UI, click on Shared Services in the navigation menu and click *Create new*.
3. Select the Certs template, then fill in the required details. *Domain prefix* should be set to `nexus` and *Cert name* should be `nexus-ssl`.
!!! caution
If you have Key Vault Purge Protection enabled and are re-deploying your environment using the same `cert_name`, you may encounter this: `Status=409 Code=\"Conflict\" Message=\"Certificate nexus-ssl is currently in a deleted but recoverable state`. You need to either manually recover the certificate or purge it before redeploying.
Once deployed, the certs service will use Let's Encrypt to generate a certificate for the specified domain prefix followed by `-{TRE_ID}.{LOCATION}.cloudapp.azure.com`, so in our case, having entered `nexus`, this will be `nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com`, which will be the public domain for our Nexus service.
You can verify whether this has been successful by navigating to your core Key Vault (`kv-{TRE_ID}`) and looking for a certificate called `nexus-ssl` (or whatever you called it).
After verifying the certificate has been generated, you can deploy Nexus:
1. Run the below command in your terminal to build, publish and register the Nexus shared service bundle:
```cmd
make shared_service_bundle BUNDLE=sonatype-nexus-vm
```
1. Navigate back to the TRE UI, and click *Create new* again within the Shared Services page.
1. Select the Nexus template, then fill in the required details. The *SSL certificate name* should default to `nexus-ssl`, so there's no need to change it unless you gave it a different name in the previous step.
This will deploy the infrastructure required for Nexus, then start the service and configure it with the repository configurations located in the `./templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config` folder. It will also set up HTTPS using the certificate you generated in the previous section, so proxies can be served at `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com`.
## Setup and usage
1. A TRE Administrator can access Nexus though the admin jumpbox provisioned as part of the TRE deployment. The username is `adminuser` and the password is located in the Key Vault under `vm-<tre-id>-jumpbox-password`
2. A researcher can access Nexus from within the workspace by using the internal Nexus URL of `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com`
3. To fetch Python packages from the PyPI proxy, a researcher can use `pip install` while specifying the proxy server:
```bash
pip install packagename --index-url https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/apt-pypi/simple
```
!!! info
In the built-in Linux and Windows Guacamole VM bundles, PyPI and several other package managers are already configured to use the Nexus proxy by default, so manually specifying in the install commands isn't necessary.
## Network requirements
Nexus Shared Service requires access to resources outside of the Azure TRE VNET. These are set as part of the firewall provisioning pipeline via explicit allow on [Service Tags](https://docs.microsoft.com/en-us/azure/virtual-network/service-tags-overview) or URLs.
| Service Tag / Destination | Justification |
| --- | --- |
| AzureActiveDirectory | Authorize the signed in user against Microsoft Entra ID. |
| AzureContainerRegistry | Pull the Nexus container image, as it is located in Azure Container Registry. |
| pypi.org, *.pypi.org | Enables Nexus to "proxy" python packages to use inside of workspaces. |
| repo.anaconda.com | Enables Nexus to "proxy" conda packages to use inside of workspaces. |
| conda.anaconda.org | Enables Nexus to "proxy" additional conda packages to use inside of workspaces such as conda-forge. |
| *.docker.com | Enables Nexus to "proxy" docker repos to use inside of workspaces. |
| *.docker.io | Enables Nexus to "proxy" docker repos to use inside of workspaces. |
| archive.ubuntu.com | Enables Nexus to "proxy" apt packages to use inside of workspaces. |
| security.ubuntu.com | Enables Nexus to "proxy" apt packages to use inside of workspaces. |
## Current Repos
| Name | Type | Source URI | Nexus URI | Usage |
| --- | --- | --- | --- | --- |
| PyPI | PyPI | [https://pypi.org/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/pypi/` | Allow use of pip commands. |
| Conda | conda | [https://repo.anaconda.com/pkgs] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/conda-repo/` | Configure conda to have access to default conda packages. |
| Conda Mirror | conda | [https://conda.anaconda.org] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/conda-mirror/` | Configure conda to have access to conda mirror packages. |
| Docker | apt | [https://download.docker.com/linux/ubuntu/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/docker/` | Install Docker via apt on Linux systems. |
| Docker GPG | raw | [https://download.docker.com/linux/ubuntu/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/docker-public-key/` | Provide public key to sign apt source for above Docker apt. |
| Docker Hub | docker | [https://registry-1.docker.io] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/docker-hub/` | Provide docker access to public images repo. |
| Ubuntu Packages | apt | [http://archive.ubuntu.com/ubuntu/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/ubuntu/` | Provide access to Ubuntu apt packages on Ubuntu systems. |
| Ubuntu Security Packages | apt | [http://security.ubuntu.com/ubuntu/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/ubuntu-security/` | Provide access to Ubuntu Security apt packages on Ubuntu systems. |
| Almalinux | yum | [https://repo.almalinux.org] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/almalinux` | Install Almalinux packages |
| R-Proxy | r | [https://cran.r-project.org/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/r-proxy` | Provide access to CRAN packages for R |
| Fedora Project | yum | [https://download-ib01.fedoraproject.org] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/fedoraproject` | Install Fedora Project Linux packages |
| Microsoft Apt | apt | [https://packages.microsoft.com] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/microsoft-apt` | Provide access to Microsoft Apt packages |
| Microsoft Keys | raw | [https://packages.microsoft.com/keys/] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/microsoft-keys` | Provide access to Microsoft keys |
| Microsoft Yum | yum | [https://packages.microsoft.com/yumrepos] | `https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/repository/microsoft-yum` | Provide access to Microsoft Yum packages |
### Migrate from an existing V1 Nexus service (hosted on App Service)
If you still have an existing Nexus installation based on App Service (from the original V1 bundle), you can migrate to the VM-based Nexus service by following these steps:
1. Install the new Nexus service alongside your old installation using the steps from earlier in this document.
1. Identify any existing Guacamole user resources that are using the old proxy URL (`https://nexus-{TRE_ID}.azurewebsites.net/`). These will be any VMs with bundle versions < `0.3.2` that haven't been manually updated.
1. These will need to be either **re-deployed** with the new template versions `0.3.2` or later and specifying an additional template parameter `"nexus_version"` with the value of `"V2"`, or manually have their proxy URLs updated by remoting into the VMs and updating the various configuration files of required package managers with the new URL (`https://nexus-{TRE_ID}.{LOCATION}.cloudapp.azure.com/`).
1. For example, pip will need the `index`, `index-url` and `trusted-host` values in the global `pip.conf` file to be modified to use the new URL.
2. Once you've confirmed there are no dependencies on the old Nexus shared service, you can delete it using the API or UI.
## Renewing certificates for Nexus
The Nexus service checks Key Vault regularly for the latest certificate matching the name you passed on deploy (`nexus-ssl` by default).
When approaching expiry, you can either provide an updated certificate into the TRE core KeyVault (with the name you specified when installing Nexus) if you brought your own, or if you used the certs shared service to generate one, just call the `renew` custom action on that service. This will generate a new certificate and persist it to the Key Vault, replacing the expired one.
|
AzureTRE/docs/tre-templates/shared-services/nexus.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/shared-services/nexus.md",
"repo_id": "AzureTRE",
"token_count": 2748
}
| 116 |
# OHDSI Workspace Service
!!! warning
- This workspace service does not work "out of the box". It requires additional networking configuration to work properly. See the [networking configuration](#networking-configuration) section for more details.
- Currently the only CDM data source supported by the workspace service is Azure Synapse.
See the [official OHDSI website](https://www.ohdsi.org/) and [The Book of OHDSI](https://ohdsi.github.io/TheBookOfOhdsi/).
This service installs the following resources into an existing virtual network within the workspace:

## Networking configuration
Deploying the OHDSI workspace is not enough for it to function properly, in order for it to work properly, the following networking configuration should be in place:
### 1. The resource processor should be able to access the CDM data source
Multiple OHDSI workspace services cannot share the same RESULTS and TEMP schemas because each OHDSI instance is changing the schemas, which could potentially cause conflicts.
To avoid this, every workspace service must work on its own schemas. To do this, we use golden copying.
This means that the "main" schemas remain untouched, and every workspace service has its own copy of the RESULTS and TEMP schemas, in the CDM data source, which it can modify.
Since the resource processor is in charge of duplicating the schemas, the CDM data source has to be accessible from the resource processor's VNet in order to be able to create them.
### 2. The workspace should be able to access the CDM data source
In order to access the CDM from ATLAS, the CDM data source should be accessible from the workspace's VNet.
Since the CDM data source is outside of TRE, this is not part of the template, however, there are many ways in which this can be done,
one example would be to to deploy a private endpoint for the CDM data source in the workspace's VNet as part of a custom workspace template.
## Setting up a CDM data source
Currently the only CDM data source supported by the workspace service is Azure Synapse.
If you already have an OMOP CDM data source, then all you have to do is to configure the network as described in the [networking configuration](#networking-configuration) section.
If you're data is in a different format, you can read [here](https://ohdsi.github.io/TheBookOfOhdsi/ExtractTransformLoad.html) how to set up the ETL process to convert your medical data to OMOP format.
## Using a sample CDM data source
If you don't have any data yet, or if you just want a quick start, you can deploy an Azure Synapse CDM data source with sample data using the [OHDSI on Azure](https://github.com/microsoft/OHDSIonAzure) repository.
When deploying set `OMOP CDM Database Type` to `Synapse Dedicated Pool` as per the [deployment guide](https://github.com/microsoft/OHDSIonAzure/blob/main/docs/DeploymentGuide.md#:~:text=OMOP%20CDM%20Database%20Type).
Note that you will need to provision a private endpoint into the Azure TRE workspace that connects to the SQL Dedicated Pool as described in the [networking configuration](#networking-configuration) section.
|
AzureTRE/docs/tre-templates/workspace-services/ohdsi.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/workspace-services/ohdsi.md",
"repo_id": "AzureTRE",
"token_count": 807
}
| 117 |
# Using the Azure TRE
This section contains information relevant on how to use AzureTRE.
|
AzureTRE/docs/using-tre/index.md/0
|
{
"file_path": "AzureTRE/docs/using-tre/index.md",
"repo_id": "AzureTRE",
"token_count": 21
}
| 118 |
import asyncio
from typing import List, Optional
from contextlib import asynccontextmanager
from httpx import AsyncClient, Timeout, Response
import logging
from starlette import status
from azure.identity import ClientSecretCredential, UsernamePasswordCredential
import config
from e2e_tests import cloud
LOGGER = logging.getLogger(__name__)
TIMEOUT = Timeout(10, read=30)
azlogger = logging.getLogger("azure")
azlogger.setLevel(logging.WARN)
class InstallFailedException(Exception):
pass
def read_workspace_id() -> str:
with open('workspace_id.txt', 'r') as f:
workspace_id = f.readline()
return workspace_id
def write_workspace_id(workspace_id: str) -> None:
with open('workspace_id.txt', 'w') as f:
f.write(workspace_id)
def get_auth_header(token: str) -> dict:
return {'Authorization': f'Bearer {token}'}
def get_full_endpoint(endpoint: str) -> str:
return f"{config.TRE_URL}{endpoint}"
@asynccontextmanager
async def get_template(template_name, endpoint, token, verify):
async with AsyncClient(verify=verify) as client:
auth_headers = get_auth_header(token)
full_endpoint = get_full_endpoint(endpoint)
response = await client.get(f"{full_endpoint}/{template_name}", headers=auth_headers, timeout=TIMEOUT)
yield response
async def get_shared_service_by_name(template_name: str, verify, token) -> Optional[dict]:
async with AsyncClient(verify=verify, timeout=TIMEOUT) as client:
full_endpoint = get_full_endpoint('/api/shared-services')
auth_headers = get_auth_header(token)
response = await client.get(full_endpoint, headers=auth_headers, timeout=TIMEOUT)
assert_status(response, [status.HTTP_200_OK], "Failed to get shared services")
LOGGER.info(f'RESPONSE: {response}')
shared_service_list = response.json()["sharedServices"]
# sort the list by most recently updated and pick the first one
shared_service_list.sort(reverse=True, key=lambda s: s['updatedWhen'])
matching_shared_service = None
for service in shared_service_list:
if service["templateName"] == template_name:
matching_shared_service = service
break
return matching_shared_service
async def check_aad_auth_redirect(endpoint, verify) -> None:
LOGGER.info(f"Checking AAD AuthN redirect on: {endpoint}")
terminal_http_status = [status.HTTP_200_OK,
status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN,
status.HTTP_302_FOUND
]
async with AsyncClient(verify=verify) as client:
while (True):
try:
response = await client.get(url=endpoint, timeout=TIMEOUT)
LOGGER.info(f"Endpoint Response: {endpoint} {response}")
if response.status_code in terminal_http_status:
break
await asyncio.sleep(30)
except Exception:
LOGGER.exception("Generic execption in http request.")
assert_status(response, [status.HTTP_302_FOUND])
assert response.has_redirect_location
location = response.headers["Location"]
LOGGER.info(f"Returned redirect URL: {location}")
valid_redirection_contains = ["login", "microsoftonline", "oauth2", "authorize"]
assert all(word in location for word in valid_redirection_contains), "Redirect URL doesn't apper to be valid"
async def get_admin_token(verify) -> str:
scope_uri = f"api://{config.API_CLIENT_ID}"
return get_token(scope_uri, verify)
def get_token(scope_uri, verify) -> str:
if config.TEST_ACCOUNT_CLIENT_ID != "" and config.TEST_ACCOUNT_CLIENT_SECRET != "":
# Logging in as an Enterprise Application: Use Client Credentials flow
credential = ClientSecretCredential(config.AAD_TENANT_ID, config.TEST_ACCOUNT_CLIENT_ID, config.TEST_ACCOUNT_CLIENT_SECRET, connection_verify=verify, authority=cloud.get_aad_authority_fqdn())
token = credential.get_token(f'{scope_uri}/.default')
else:
# Logging in as a User: Use Resource Owner Password Credentials flow
credential = UsernamePasswordCredential(config.TEST_APP_ID, config.TEST_USER_NAME, config.TEST_USER_PASSWORD, connection_verify=verify, authority=cloud.get_aad_authority_fqdn(), tenant_id=config.AAD_TENANT_ID)
token = credential.get_token(f'{scope_uri}/user_impersonation')
return token.token
def assert_status(response: Response, expected_status: List[int] = [200], message_prefix: str = "Unexpected HTTP Status"):
assert response.status_code in expected_status, \
f"{message_prefix}. Expected: {expected_status}. Actual: {response.status_code}. Response text: {response.text}"
|
AzureTRE/e2e_tests/helpers.py/0
|
{
"file_path": "AzureTRE/e2e_tests/helpers.py",
"repo_id": "AzureTRE",
"token_count": 1913
}
| 119 |
import pytest
from httpx import AsyncClient
from starlette import status
import config
from helpers import assert_status, get_auth_header, get_template
from resources import strings
from helpers import get_admin_token
pytestmark = pytest.mark.asyncio
workspace_service_templates = [
(strings.AZUREML_SERVICE),
(strings.GUACAMOLE_SERVICE),
(strings.GITEA_SERVICE)
]
@pytest.mark.smoke
@pytest.mark.parametrize("template_name", workspace_service_templates)
async def test_get_workspace_service_templates(template_name, verify) -> None:
async with AsyncClient(verify=verify) as client:
admin_token = await get_admin_token(verify)
response = await client.get(f"{config.TRE_URL}{strings.API_WORKSPACE_SERVICE_TEMPLATES}", headers=get_auth_header(admin_token))
template_names = [templates["name"] for templates in response.json()["templates"]]
assert (template_name in template_names), f"No {template_name} template found"
@pytest.mark.smoke
@pytest.mark.parametrize("template_name", workspace_service_templates)
async def test_get_workspace_service_template(template_name, verify) -> None:
admin_token = await get_admin_token(verify)
async with get_template(template_name, strings.API_WORKSPACE_SERVICE_TEMPLATES, admin_token, verify) as response:
assert (response.status_code == status.HTTP_200_OK), f"GET Request for {template_name} failed"
assert_status(response, [status.HTTP_200_OK], f"Failed to GET {template_name}")
@pytest.mark.smoke
async def test_create_workspace_service_templates(verify) -> None:
async with AsyncClient(verify=verify) as client:
payload = {
"name": f"{strings.TEST_WORKSPACE_SERVICE_TEMPLATE}",
"version": "0.0.1",
"current": "true",
"json_schema": {
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json",
"type": "object",
"title": "DONOTUSE",
"description": "DO NOT USE",
"required": [],
"properties": {}
}
}
admin_token = await get_admin_token(verify)
response = await client.post(f"{config.TRE_URL}{strings.API_WORKSPACE_SERVICE_TEMPLATES}", headers=get_auth_header(admin_token), json=payload)
assert_status(response, [status.HTTP_201_CREATED, status.HTTP_409_CONFLICT], "Failed to create workspace service template")
|
AzureTRE/e2e_tests/test_workspace_service_templates.py/0
|
{
"file_path": "AzureTRE/e2e_tests/test_workspace_service_templates.py",
"repo_id": "AzureTRE",
"token_count": 1023
}
| 120 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
# Install Azure CLI
apt-get update
apt-get -y install ca-certificates curl apt-transport-https lsb-release gnupg
curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/microsoft.gpg > /dev/null
AZ_REPO="$(lsb_release -cs)"
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $AZ_REPO main" | tee /etc/apt/sources.list.d/azure-cli.list
apt-get update
apt-get -y install azure-cli="${AZURE_CLI_VERSION}"
|
AzureTRE/resource_processor/scripts/azure-cli.sh/0
|
{
"file_path": "AzureTRE/resource_processor/scripts/azure-cli.sh",
"repo_id": "AzureTRE",
"token_count": 236
}
| 121 |
---
default-storage: mydb
default-secrets: mysecrets
storage:
- name: mydb
plugin: mongodb
config:
url: ${secret.porter-db-connection-string}
secrets:
- name: mysecrets
plugin: azure.keyvault
config:
vault-url: ${env.KEY_VAULT_URL}
|
AzureTRE/resource_processor/vmss_porter/config.yaml/0
|
{
"file_path": "AzureTRE/resource_processor/vmss_porter/config.yaml",
"repo_id": "AzureTRE",
"token_count": 115
}
| 122 |
# See https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Put files here that you don't want copied into your bundle's invocation image
.gitignore
**/.terraform/*
**/*_backend.tf
Dockerfile.tmpl
.env*
deploy.sh
destroy.sh
|
AzureTRE/templates/shared_services/airlock_notifier/.dockerignore/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/.dockerignore",
"repo_id": "AzureTRE",
"token_count": 77
}
| 123 |
output "airlock_notifier_logic_app_name" {
value = azurerm_logic_app_standard.logic_app.name
}
output "airlock_notifier_logic_app_resource_group_name" {
value = azurerm_logic_app_standard.logic_app.resource_group_name
}
|
AzureTRE/templates/shared_services/airlock_notifier/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 89
}
| 124 |
locals {
staticweb_storage_name = lower(replace("stwebcerts${var.tre_id}", "-", ""))
staticweb_backend_pool_name = "beap-certs-staticweb"
app_path_map_name = "upm-certs"
redirect_path_map_name = "upm-certs-redirect"
insecure_frontend_port_name = "feport-certs-insecure"
secure_frontend_port_name = "feport-certs-secure"
frontend_ip_configuration_name = "feip-certs-public"
staticweb_http_setting_name = "be-htst-certs-staticweb"
insecure_listener_name = "httplstn-certs-insecure"
secure_listener_name = "httplstn-certs-secure"
redirect_request_routing_rule_name = "rqrt-certs-redirect"
request_routing_rule_name = "rqrt-certs-application"
redirect_configuration_name = "rdrcfg-certs-tosecure"
tre_shared_service_tags = {
tre_id = var.tre_id
tre_shared_service_id = var.tre_resource_id
}
}
|
AzureTRE/templates/shared_services/certs/terraform/locals.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/certs/terraform/locals.tf",
"repo_id": "AzureTRE",
"token_count": 383
}
| 125 |
output "connection_uri" {
value = "https://${azurerm_private_dns_zone.cyclecloud.name}"
}
|
AzureTRE/templates/shared_services/cyclecloud/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/cyclecloud/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 34
}
| 126 |
resource "azurerm_firewall_policy_rule_collection_group" "core" {
name = "rcg-core"
firewall_policy_id = azurerm_firewall_policy.root.id
priority = 500
network_rule_collection {
name = "nrc-general"
priority = 201
action = "Allow"
rule {
name = "time"
protocols = [
"UDP"
]
destination_addresses = [
"*"
]
destination_ports = [
"123"
]
source_addresses = [
"*"
]
}
}
network_rule_collection {
name = "nrc-resource-processor-subnet"
priority = 202
action = "Allow"
rule {
name = "azure-services"
protocols = [
"TCP"
]
destination_addresses = [
"AzureActiveDirectory",
"AzureResourceManager",
"AzureContainerRegistry",
"Storage",
"AzureKeyVault"
]
destination_ports = [
"443"
]
source_ip_groups = [data.azurerm_ip_group.resource_processor.id]
}
}
network_rule_collection {
name = "nrc-web-app-subnet"
priority = 203
action = "Allow"
rule {
name = "azure-services"
protocols = [
"TCP"
]
destination_addresses = [
"AzureActiveDirectory",
"AzureContainerRegistry",
"AzureResourceManager"
]
destination_ports = [
"443"
]
source_ip_groups = [data.azurerm_ip_group.web.id]
}
}
application_rule_collection {
name = "arc-resource-processor-subnet"
priority = 301
action = "Allow"
rule {
name = "os-package-sources"
protocols {
port = "443"
type = "Https"
}
protocols {
port = "80"
type = "Http"
}
destination_fqdns = [
"packages.microsoft.com",
"keyserver.ubuntu.com",
"api.snapcraft.io",
"azure.archive.ubuntu.com",
"security.ubuntu.com",
"entropy.ubuntu.com",
]
source_ip_groups = [data.azurerm_ip_group.resource_processor.id]
}
rule {
name = "docker-sources"
protocols {
port = "443"
type = "Https"
}
protocols {
port = "80"
type = "Http"
}
destination_fqdns = [
"download.docker.com",
"registry-1.docker.io",
"auth.docker.io",
]
source_ip_groups = [data.azurerm_ip_group.resource_processor.id]
}
# This rule is needed to support Gov Cloud.
# The az cli uses msal lib which requires access to this fqdn for authentication.
rule {
name = "microsoft-login"
protocols {
port = "443"
type = "Https"
}
destination_fqdns = [
"login.microsoftonline.com",
]
source_ip_groups = [data.azurerm_ip_group.resource_processor.id]
}
}
application_rule_collection {
name = "arc-shared-subnet"
priority = 302
action = "Allow"
rule {
name = "nexus-bootstrap"
protocols {
port = "443"
type = "Https"
}
protocols {
port = "80"
type = "Http"
}
destination_fqdns = [
"keyserver.ubuntu.com",
"packages.microsoft.com",
"download.docker.com",
"azure.archive.ubuntu.com"
]
source_ip_groups = [data.azurerm_ip_group.shared.id]
}
}
application_rule_collection {
name = "arc-web-app-subnet"
priority = 303
action = "Allow"
rule {
name = "microsoft-graph"
protocols {
port = "443"
type = "Https"
}
destination_fqdns = [
var.microsoft_graph_fqdn
]
source_ip_groups = [data.azurerm_ip_group.web.id]
}
}
application_rule_collection {
name = "arc-airlock-processor-subnet"
priority = 304
action = "Allow"
rule {
name = "functions-runtime"
protocols {
port = "443"
type = "Https"
}
destination_fqdns = [
"functionscdn.azureedge.net"
]
source_ip_groups = [data.azurerm_ip_group.airlock_processor.id]
}
}
depends_on = [
azurerm_firewall.fw
]
}
resource "azurerm_firewall_policy_rule_collection_group" "dynamic_network" {
name = "rcg-dynamic-network"
firewall_policy_id = azurerm_firewall_policy.root.id
priority = 510
dynamic "network_rule_collection" {
for_each = { for i, v in local.api_driven_network_rule_collection : i => v }
content {
name = network_rule_collection.value.name
priority = 200 + network_rule_collection.key
action = "Allow"
dynamic "rule" {
for_each = network_rule_collection.value.rules
content {
name = rule.value.name
# description = rule.value.description
source_addresses = try(rule.value.source_addresses, [])
source_ip_groups = concat(
try(rule.value.source_ip_group_ids, []),
try([for item in rule.value.source_ip_groups_in_core : data.azurerm_ip_group.referenced[item].id], [])
)
destination_addresses = try(rule.value.destination_addresses, [])
destination_ip_groups = try(rule.value.destination_ip_group_ids, [])
destination_fqdns = try(rule.value.destination_fqdns, [])
destination_ports = try(rule.value.destination_ports, [])
protocols = try(rule.value.protocols, [])
}
}
}
}
depends_on = [
azurerm_firewall_policy_rule_collection_group.core
]
}
resource "azurerm_firewall_policy_rule_collection_group" "dynamic_application" {
name = "rcg-dynamic-application"
firewall_policy_id = azurerm_firewall_policy.root.id
priority = 520
dynamic "application_rule_collection" {
for_each = { for i, v in local.api_driven_application_rule_collection : i => v }
content {
name = application_rule_collection.value.name
priority = 200 + application_rule_collection.key
action = "Allow"
dynamic "rule" {
for_each = application_rule_collection.value.rules
content {
name = rule.value.name
description = rule.value.description
dynamic "protocols" {
for_each = rule.value.protocols
content {
port = protocols.value.port
type = protocols.value.type
}
}
destination_fqdns = try(rule.value.target_fqdns, [])
source_addresses = try(rule.value.source_addresses, [])
source_ip_groups = concat(
try(rule.value.source_ip_group_ids, []),
try([for item in rule.value.source_ip_groups_in_core : data.azurerm_ip_group.referenced[item].id], [])
)
destination_fqdn_tags = try(rule.value.fqdn_tags, [])
}
}
}
}
depends_on = [
azurerm_firewall_policy_rule_collection_group.dynamic_network
]
}
|
AzureTRE/templates/shared_services/firewall/terraform/rules.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/firewall/terraform/rules.tf",
"repo_id": "AzureTRE",
"token_count": 3358
}
| 127 |
{
"name": "r-proxy",
"online": true,
"storage": {
"blobStoreName": "default",
"strictContentTypeValidation": true,
"write_policy": "ALLOW"
},
"proxy": {
"remoteUrl": "https://cran.r-project.org/",
"contentMaxAge": 1440,
"metadataMaxAge": 1440
},
"negativeCache": {
"enabled": true,
"timeToLive": 1440
},
"httpClient": {
"blocked": false,
"autoBlock": false,
"connection": {
"retries": 0,
"userAgentSuffix": "string",
"timeout": 60,
"enableCircularRedirects": false,
"enableCookies": false,
"useTrustStore": false
}
},
"baseType": "r",
"repoType": "proxy"
}
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config/cran_proxy_conf.json/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config/cran_proxy_conf.json",
"repo_id": "AzureTRE",
"token_count": 292
}
| 128 |
#!/bin/bash
export TF_LOG=""
terraform init -input=false -backend=true -reconfigure -upgrade \
-backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name:?}" \
-backend-config="storage_account_name=${TF_VAR_mgmt_storage_account_name:?}" \
-backend-config="container_name=${TF_VAR_terraform_state_container_name:?}" \
-backend-config="key=${TRE_ID:?}-shared-service-sonatype-nexus"
terraform plan
terraform apply -auto-approve
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 182
}
| 129 |
data "azurerm_resource_group" "ws" {
name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_virtual_network" "ws" {
name = "vnet-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = data.azurerm_resource_group.ws.name
}
resource "azurerm_application_insights" "ai" {
name = "ai-${local.service_resource_name_suffix}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
application_type = "web"
tags = local.tre_workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
data "azurerm_key_vault" "ws" {
name = local.keyvault_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_subnet" "shared" {
resource_group_name = local.core_resource_group_name
virtual_network_name = local.core_vnet
name = "SharedSubnet"
}
data "azurerm_route_table" "rt" {
name = "rt-${var.tre_id}"
resource_group_name = local.core_resource_group_name
}
data "azurerm_private_dns_zone" "azureml" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.api.azureml.ms"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_private_dns_zone" "azuremlcert" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.cert.api.azureml.ms"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_private_dns_zone" "notebooks" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.notebooks.azure.net"]
resource_group_name = local.core_resource_group_name
}
|
AzureTRE/templates/workspace_services/azureml/terraform/data.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 756
}
| 130 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/custom_parameters.json",
"type": "object",
"title": "Azure Machine Learning Compute Instance",
"description": "An Azure Machine Learning compute instance is a managed cloud-based workstation for data scientists. Each compute instance has only one owner, although you can share files between multiple compute instances.",
"required": [],
"properties": {
"display_name": {
"type": "string",
"title": "Name for the user resource",
"description": "The name of the user resource to be displayed to users",
"default": "Compute Instance",
"updateable": true
},
"description": {
"type": "string",
"title": "Description of the user resource",
"description": "Description of the user resource",
"default": "AML Compute Instance",
"updateable": true
},
"overview": {
"type": "string",
"title": "User Resource Overview",
"description": "Long form description of the user resource, in markdown syntax",
"default": "An Azure Machine Learning compute instance is a managed cloud-based workstation for data scientists. Each compute instance has only one owner, although you can share files between multiple compute instances.\n- [Azure Machine Learning Compute Instance](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-instance)",
"updateable": true
},
"vm_size": {
"type": "string",
"title": "Virtual Machine Size",
"description": "The size of the virtual machine to be created.",
"enum": [
"Standard_D2_v3",
"Standard_D4_v3",
"Standard_D8_v3",
"Standard_D16_v3"
],
"default": "Standard_D2_v3"
},
"user_object_id": {
"type": "string",
"title": "Azure Active Directory User Object ID",
"description": "The Azure Active Directory user object ID of User who will be using the compute instance",
"default": "",
"minLength": 1
}
}
}
|
AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 750
}
| 131 |
data "azurerm_resource_group" "ws" {
name = local.resource_group_name
}
data "azurerm_virtual_network" "ws" {
name = local.virtual_network_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_firewall" "firewall" {
name = local.firewall_name
resource_group_name = local.core_resource_group_name
}
data "azurerm_subnet" "services" {
name = "ServicesSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_virtual_network.ws.resource_group_name
}
data "azurerm_private_dns_zone" "databricks" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azuredatabricks.net"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_subscription" "current" {}
data "azurerm_private_dns_zone" "dfscore" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.dfs.core.windows.net"]
resource_group_name = local.core_resource_group_name
}
|
AzureTRE/templates/workspace_services/databricks/terraform/data.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/databricks/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 434
}
| 132 |
{
"schemaType": "ParameterSet",
"schemaVersion": "1.0.1",
"namespace": "",
"name": "tre-workspace-service-gitea",
"parameters": [
{
"name": "id",
"source": {
"env": "ID"
}
},
{
"name": "workspace_id",
"source": {
"env": "WORKSPACE_ID"
}
},
{
"name": "tre_id",
"source": {
"env": "TRE_ID"
}
},
{
"name": "mgmt_acr_name",
"source": {
"env": "ACR_NAME"
}
},
{
"name": "mgmt_resource_group_name",
"source": {
"env": "MGMT_RESOURCE_GROUP_NAME"
}
},
{
"name": "tfstate_container_name",
"source": {
"env": "TERRAFORM_STATE_CONTAINER_NAME"
}
},
{
"name": "tfstate_resource_group_name",
"source": {
"env": "MGMT_RESOURCE_GROUP_NAME"
}
},
{
"name": "tfstate_storage_account_name",
"source": {
"env": "MGMT_STORAGE_ACCOUNT_NAME"
}
},
{
"name": "aad_authority_url",
"source": {
"env": "AAD_AUTHORITY_URL"
}
},
{
"name": "arm_environment",
"source": {
"env": "ARM_ENVIRONMENT"
}
},
{
"name": "sql_sku",
"source": {
"env": "SQL_SKU"
}
}
]
}
|
AzureTRE/templates/workspace_services/gitea/parameters.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/parameters.json",
"repo_id": "AzureTRE",
"token_count": 758
}
| 133 |
# GUID to identify the workspace service
ID=__CHANGE_ME__
TRE_RESOURCE_ID=__SAME_AS_ID__
# GUID to identify the workspace bundle
WORKSPACE_ID="__CHANGE_ME__"
# Guacamole image tag to use (version in templates\workspace\services\guacamole\version.txt)
GUACAMOLE_IMAGE_TAG="__CHANGE_ME__"
MGMT_ACR_NAME="__CHANGE_ME__"
ARM_USE_MSI=false
GUAC_DISABLE_COPY=true
GUAC_DISABLE_PASTE=false
GUAC_ENABLE_DRIVE=true
GUAC_DRIVE_NAME="transfer"
GUAC_DRIVE_PATH="/guac-transfer"
GUAC_DISABLE_DOWNLOAD=true
GUAC_DISABLE_UPLOAD=true
IS_EXPOSED_EXTERNALLY=false
image_name="guac-server"
image_tag=""
|
AzureTRE/templates/workspace_services/guacamole/.env.sample/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/.env.sample",
"repo_id": "AzureTRE",
"token_count": 244
}
| 134 |
package org.apache.guacamole.auth.azuretre;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.auth0.jwk.Jwk;
import com.auth0.jwk.UrlJwkProvider;
import com.auth0.jwt.JWT;
import com.auth0.jwt.algorithms.Algorithm;
import org.apache.guacamole.net.auth.credentials.GuacamoleInvalidCredentialsException;
import org.hamcrest.CoreMatchers;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.EnvironmentVariables;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.security.KeyFactory;
import java.security.NoSuchAlgorithmException;
import java.security.PrivateKey;
import java.security.PublicKey;
import java.security.interfaces.RSAPrivateKey;
import java.security.interfaces.RSAPublicKey;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.PKCS8EncodedKeySpec;
import java.security.spec.X509EncodedKeySpec;
import java.util.Base64;
import java.util.Calendar;
import java.util.Date;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AuthenticationProviderServiceTest {
@Rule public final EnvironmentVariables environmentVariables = new EnvironmentVariables();
String audience = "dummy_audience";
String issuer = "dummy_issuer";
private static PublicKey getPublicKey() throws NoSuchAlgorithmException, InvalidKeySpecException {
// openssl rsa -in private.pem -outform PEM -pubout -out public.pem
String rsaPublicKey =
"-----BEGIN PUBLIC KEY-----"
+ "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFfaNjrzA2gSHzzXPZnY"
+ "lGCWYuzU8hKJchNqnuSQW+j06Mnp8YTEDp1pP50SQmye1i0hQdE8hb8PC8O7C9NN"
+ "pYbrWoxQRJnhV/mdgfESaWrngsFr6KPVGzUWssEtF1uLHv6Y5SkhXpjHgx6+NKhL"
+ "0iWnsEDg9aj1viTq6VXAsqfsOjGTVjaz/TnSUSzgAjor/7QbUk+6gZUWiU5nq3qJ"
+ "NpF6KRAgfcvOTFO7bN9piUt19gMaPMHW9PGTwXO1SywUMCLnyhTGPVqTm/nW8Tj2"
+ "j+51l6yo1ARFTjdcwstYVIKby0LFeUQEZYfaEFHN78N1ztGSuHuH+sxyEjuWn0J1"
+ "oQIDAQAB"
+ "-----END PUBLIC KEY-----";
rsaPublicKey = rsaPublicKey.replace("-----BEGIN PUBLIC KEY-----", "");
rsaPublicKey = rsaPublicKey.replace("-----END PUBLIC KEY-----", "");
final X509EncodedKeySpec keySpec =
new X509EncodedKeySpec(Base64.getDecoder().decode(rsaPublicKey));
final KeyFactory kf = KeyFactory.getInstance("RSA");
final PublicKey publicKey = kf.generatePublic(keySpec);
return publicKey;
}
private static PrivateKey getPrivateKey()
throws NoSuchAlgorithmException, InvalidKeySpecException {
// openssl genrsa -out private.pem 2048
// openssl pkcs8 -topk8 -inform PEM -outform DER -in private.pem -out
// private.der -nocrypt
String rsaPrivateKey =
"-----BEGIN PRIVATE KEY-----"
+ "MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC0V9o2OvMDaBIf"
+ "PNc9mdiUYJZi7NTyEolyE2qe5JBb6PToyenxhMQOnWk/nRJCbJ7WLSFB0TyFvw8L"
+ "w7sL002lhutajFBEmeFX+Z2B8RJpaueCwWvoo9UbNRaywS0XW4se/pjlKSFemMeD"
+ "Hr40qEvSJaewQOD1qPW+JOrpVcCyp+w6MZNWNrP9OdJRLOACOiv/tBtST7qBlRaJ"
+ "Tmereok2kXopECB9y85MU7ts32mJS3X2Axo8wdb08ZPBc7VLLBQwIufKFMY9WpOb"
+ "+dbxOPaP7nWXrKjUBEVON1zCy1hUgpvLQsV5RARlh9oQUc3vw3XO0ZK4e4f6zHIS"
+ "O5afQnWhAgMBAAECggEAWhZwuRplftQkCVq5ItqRaD1/olcwYOSFqGiuoEUJIACV"
+ "JxyGMtHhpnNXuiFal7fu+Ip+zIQbOayhdX0HGPcrGH73Xros9rfv66ST2+9zBRoU"
+ "ICtDHmmI8RhqCE2bmsluC8Oe2Qrc0oZ7U7KtzVws1ANfaxpdxhnq+Fs0xe7CXfvQ"
+ "pQ9cu2L0BQ9ilh4ijfekOl/83sdydZ9FkpohIaQEejQhFP9HaMr+RMEcEUbRLERM"
+ "oLzdRsmTDOlKjsJes+LkjxhYzrk6DpBwmyfjI6PYVSzFPgcHY9rqpkXUUoAK5wQf"
+ "KbT40S8cdTaJoALwTgXIRWLjW1zWIe4fmtxs6KCnqQKBgQDuJJ98PoXsZiiIcg9n"
+ "4sSVUHEJB4HpVOih9u9kGxCobQKO+iYkRyyixJci+WIzpD8AMn3IPtgdBGKomTS6"
+ "NwSAIH0Y7JITzsWy9rfUj0L1sB0JEm/uXeeRmxVm+MG5IhBMHMo2LXi8p1v401bn"
+ "2K2NZcqnlA+g+GAvW26n0FIgCwKBgQDB3bT7r8UNh27Qf6mnomPiJOo2+d42ikrv"
+ "U2TZdwGgHfV8psFZ4G4p691OeWDSV4xZ8u1yXjKXV3pbZnO0gxpkFG0dxd0WDllU"
+ "WS8xYewGlx4trOl8Hbtf4RvHJnKM/A+EKx2A6BwPZFDUONTMhywBAHQXPkGSGP0J"
+ "k3CVn+4wgwKBgQCmNb9uawDz3tVZbipceoR0JmHOSIQeg5meOYgZ2V4F/5dyjRsD"
+ "5P09WXKXAXHN00v5akQp99rEXeQyAkQv1+h3OLW3KJ5H3uBTKSli3N6CNfn98/VV"
+ "bAsMsC3+4Y3sFd9EEC/+IjyLh0+E2pRkWvG+p5YK4icKVXBkfS89RwOawwKBgF8i"
+ "zqrocdoWyTG2RGpITZ3voaSC8CJxsR5LHWV+eiS6LvsR1jal5UnbPopBFFuErRKD"
+ "HTUPtuIAAsKRv1wpLi1IvNdsfvdQ6VN0RK2GMU52oE+n2BiZepctn/UWEAbRt0eT"
+ "5PGadhKzltreXMdV2ilPsKirW4A3lQ069nfmuPvDAoGAO0APMgRCFiympXisWEcQ"
+ "Q/U7ZbuYPSBaylBds9GcMoqjlffoxISr20w0iKXokO2DoYmTTeUtjIdfHTt6OIgK"
+ "+KnwY1Wo7yTAtR3Rt1PEPHncSNkRYD7EAIjH7m4EF64awF4ki+34Kfc0/SYxoo2N"
+ "1A3YhlsQ9cHSWJ2/zjavu/0="
+ "-----END PRIVATE KEY-----";
rsaPrivateKey = rsaPrivateKey.replace("-----BEGIN PRIVATE KEY-----", "");
rsaPrivateKey = rsaPrivateKey.replace("-----END PRIVATE KEY-----", "");
final PKCS8EncodedKeySpec keySpec =
new PKCS8EncodedKeySpec(Base64.getDecoder().decode(rsaPrivateKey));
final KeyFactory kf = KeyFactory.getInstance("RSA");
final PrivateKey privKey = kf.generatePrivate(keySpec);
return privKey;
}
private String generateNoRolesJWTToken()
throws IllegalArgumentException, NoSuchAlgorithmException, InvalidKeySpecException {
final Algorithm algorithm =
Algorithm.RSA256((RSAPublicKey) getPublicKey(), (RSAPrivateKey) getPrivateKey());
final Calendar c = Calendar.getInstance();
final Date currentDate = c.getTime();
c.add(Calendar.HOUR, 24);
final Date expireDate = c.getTime();
final String jwtToken =
JWT.create()
.withIssuer(issuer)
.withKeyId("dummy_keyid")
.withAudience(audience)
.withIssuedAt(currentDate)
.withExpiresAt(expireDate)
.withClaim("oid", "dummy_oid")
.withClaim("preferred_username", "dummy_preferred_username")
.sign(algorithm);
return jwtToken;
}
private String generateEmptyRolesJWTToken()
throws IllegalArgumentException, NoSuchAlgorithmException, InvalidKeySpecException {
final Algorithm algorithm =
Algorithm.RSA256((RSAPublicKey) getPublicKey(), (RSAPrivateKey) getPrivateKey());
final Calendar c = Calendar.getInstance();
final Date currentDate = c.getTime();
c.add(Calendar.HOUR, 24);
final Date expireDate = c.getTime();
final String[] emptyUserRoles = {};
final String jwtToken =
JWT.create()
.withIssuer(issuer)
.withKeyId("dummy_keyid")
.withAudience(audience)
.withIssuedAt(currentDate)
.withExpiresAt(expireDate)
.withClaim("oid", "dummy_oid")
.withClaim("preferred_username", "dummy_preferred_username")
.withArrayClaim("roles", emptyUserRoles)
.sign(algorithm);
return jwtToken;
}
private String internalGenerateValidJWTToken(String validRole)
throws IllegalArgumentException, NoSuchAlgorithmException, InvalidKeySpecException {
final Algorithm algorithm =
Algorithm.RSA256((RSAPublicKey) getPublicKey(), (RSAPrivateKey) getPrivateKey());
final Calendar c = Calendar.getInstance();
final Date currentDate = c.getTime();
c.add(Calendar.HOUR, 24);
final Date expireDate = c.getTime();
final String[] validUserRoles = {validRole, "Another-Role"};
final String jwtToken =
JWT.create()
.withIssuer(issuer)
.withKeyId("dummy_keyid")
.withAudience(audience)
.withIssuedAt(currentDate)
.withExpiresAt(expireDate)
.withClaim("oid", "dummy_oid")
.withClaim("preferred_username", "dummy_preferred_username")
.withArrayClaim("roles", validUserRoles)
.sign(algorithm);
return jwtToken;
}
private String generateValidJWTToken()
throws IllegalArgumentException, NoSuchAlgorithmException, InvalidKeySpecException {
return internalGenerateValidJWTToken("WorkspaceOwner");
}
private String generateValidJWTTokenWithWrongRole()
throws IllegalArgumentException, NoSuchAlgorithmException, InvalidKeySpecException {
return internalGenerateValidJWTToken("NotTheRightRole");
}
private String generateExpiredJWTToken()
throws IllegalArgumentException, NoSuchAlgorithmException, InvalidKeySpecException {
final Algorithm algorithm =
Algorithm.RSA256((RSAPublicKey) getPublicKey(), (RSAPrivateKey) getPrivateKey());
final Calendar c = Calendar.getInstance();
final Date currentDate = c.getTime();
c.add(Calendar.HOUR, -24);
final Date expireDate = c.getTime();
final String[] validUserRole = {"Project-User"};
final String jwtToken =
JWT.create()
.withIssuer(issuer)
.withKeyId("dummy_keyid")
.withAudience(audience)
.withIssuedAt(currentDate)
.withExpiresAt(expireDate)
.withClaim("oid", "dummy_oid")
.withClaim("preferred_username", "dummy_preferred_username")
.withArrayClaim("roles", validUserRole)
.sign(algorithm);
return jwtToken;
}
@Test
public void validateTokenFailsWhenNoNeededRole() throws Exception {
final String jwtToken = generateValidJWTTokenWithWrongRole();
final PublicKey publicKey = getPublicKey();
final Algorithm algorithm = Algorithm.RSA256((RSAPublicKey) publicKey, null);
try (MockedStatic<Algorithm> mockAlgorithm = Mockito.mockStatic(Algorithm.class)) {
mockAlgorithm
.when(() -> Algorithm.RSA256((RSAPublicKey) publicKey, null))
.thenReturn(algorithm);
final Jwk jwk = mock(Jwk.class);
final UrlJwkProvider jwkProvider = mock(UrlJwkProvider.class);
when(jwk.getPublicKey()).thenReturn(publicKey);
when(jwkProvider.get("dummy_keyid")).thenReturn(jwk);
environmentVariables.set("AUDIENCE", audience);
environmentVariables.set("ISSUER", issuer);
final AuthenticationProviderService azureTREAuthenticationProviderService =
new AuthenticationProviderService();
final Method validateToken =
AuthenticationProviderService.class.getDeclaredMethod(
"validateToken", String.class, UrlJwkProvider.class);
validateToken.setAccessible(true);
try {
validateToken.invoke(azureTREAuthenticationProviderService, jwtToken, jwkProvider);
fail("Exception not thrown");
} catch (final InvocationTargetException e) {
assertEquals(GuacamoleInvalidCredentialsException.class, e.getTargetException().getClass());
}
}
}
private void validateTokenSucceedWhenValidRole(String role) throws Exception {
final String jwtToken = internalGenerateValidJWTToken(role);
final PublicKey publicKey = getPublicKey();
final Algorithm algorithm = Algorithm.RSA256((RSAPublicKey) publicKey, null);
try (MockedStatic<Algorithm> mockAlgorithm = Mockito.mockStatic(Algorithm.class)) {
mockAlgorithm
.when(() -> Algorithm.RSA256((RSAPublicKey) publicKey, null))
.thenReturn(algorithm);
final Jwk jwk = mock(Jwk.class);
final UrlJwkProvider jwkProvider = mock(UrlJwkProvider.class);
when(jwk.getPublicKey()).thenReturn(publicKey);
when(jwkProvider.get("dummy_keyid")).thenReturn(jwk);
environmentVariables.set("AUDIENCE", audience);
environmentVariables.set("ISSUER", issuer);
final AuthenticationProviderService azureTREAuthenticationProviderService =
new AuthenticationProviderService();
final Method validateToken =
AuthenticationProviderService.class.getDeclaredMethod(
"validateToken", String.class, UrlJwkProvider.class);
validateToken.setAccessible(true);
validateToken.invoke(azureTREAuthenticationProviderService, jwtToken, jwkProvider);
}
}
@Test
public void validateTokenSucceedWhenResearcherRole() throws Exception {
validateTokenSucceedWhenValidRole("WorkspaceResearcher");
}
@Test
public void validateTokenSucceedWhenOwnerRole() throws Exception {
validateTokenSucceedWhenValidRole("WorkspaceOwner");
}
@Test
public void validateTokenThrowsWhenNoRole() throws Exception {
final String jwtToken = generateNoRolesJWTToken();
final PublicKey publicKey = getPublicKey();
final Algorithm algorithm = Algorithm.RSA256((RSAPublicKey) publicKey, null);
try (MockedStatic<Algorithm> mockAlgorithm = Mockito.mockStatic(Algorithm.class)) {
mockAlgorithm
.when(() -> Algorithm.RSA256((RSAPublicKey) publicKey, null))
.thenReturn(algorithm);
final Jwk jwk = mock(Jwk.class);
final UrlJwkProvider jwkProvider = mock(UrlJwkProvider.class);
when(jwk.getPublicKey()).thenReturn(publicKey);
when(jwkProvider.get("dummy_keyid")).thenReturn(jwk);
environmentVariables.set("AUDIENCE", audience);
environmentVariables.set("ISSUER", issuer);
final AuthenticationProviderService azureTREAuthenticationProviderService =
new AuthenticationProviderService();
final Method validateToken =
AuthenticationProviderService.class.getDeclaredMethod(
"validateToken", String.class, UrlJwkProvider.class);
validateToken.setAccessible(true);
try {
validateToken.invoke(azureTREAuthenticationProviderService, jwtToken, jwkProvider);
fail("Exception not thrown");
} catch (final InvocationTargetException e) {
assertEquals(GuacamoleInvalidCredentialsException.class, e.getTargetException().getClass());
}
}
}
@Test
public void validateTokenThrowsWhenEmptyRole() throws Exception {
final String jwtToken = generateEmptyRolesJWTToken();
final PublicKey publicKey = getPublicKey();
final Algorithm algorithm = Algorithm.RSA256((RSAPublicKey) publicKey, null);
try (MockedStatic<Algorithm> mockAlgorithm = Mockito.mockStatic(Algorithm.class)) {
mockAlgorithm
.when(() -> Algorithm.RSA256((RSAPublicKey) publicKey, null))
.thenReturn(algorithm);
final Jwk jwk = mock(Jwk.class);
final UrlJwkProvider jwkProvider = mock(UrlJwkProvider.class);
when(jwk.getPublicKey()).thenReturn(publicKey);
when(jwkProvider.get("dummy_keyid")).thenReturn(jwk);
environmentVariables.set("AUDIENCE", audience);
environmentVariables.set("ISSUER", issuer);
final AuthenticationProviderService azureTREAuthenticationProviderService =
new AuthenticationProviderService();
final Method validateToken =
AuthenticationProviderService.class.getDeclaredMethod(
"validateToken", String.class, UrlJwkProvider.class);
validateToken.setAccessible(true);
try {
validateToken.invoke(azureTREAuthenticationProviderService, jwtToken, jwkProvider);
fail("Exception not thrown");
} catch (final InvocationTargetException e) {
assertEquals(GuacamoleInvalidCredentialsException.class, e.getTargetException().getClass());
}
}
}
@Test
public void validateTokenThrowsWhenExpiredToken() throws Exception {
final String jwtToken = generateExpiredJWTToken();
final PublicKey publicKey = getPublicKey();
final Algorithm algorithm = Algorithm.RSA256((RSAPublicKey) publicKey, null);
try (MockedStatic<Algorithm> mockAlgorithm = Mockito.mockStatic(Algorithm.class)) {
mockAlgorithm
.when(() -> Algorithm.RSA256((RSAPublicKey) publicKey, null))
.thenReturn(algorithm);
final Jwk jwk = mock(Jwk.class);
final UrlJwkProvider jwkProvider = mock(UrlJwkProvider.class);
when(jwk.getPublicKey()).thenReturn(publicKey);
when(jwkProvider.get("dummy_keyid")).thenReturn(jwk);
environmentVariables.set("AUDIENCE", audience);
environmentVariables.set("ISSUER", issuer);
final AuthenticationProviderService azureTREAuthenticationProviderService =
new AuthenticationProviderService();
final Method validateToken =
AuthenticationProviderService.class.getDeclaredMethod(
"validateToken", String.class, UrlJwkProvider.class);
validateToken.setAccessible(true);
try {
validateToken.invoke(azureTREAuthenticationProviderService, jwtToken, jwkProvider);
fail("Exception not thrown");
} catch (final InvocationTargetException e) {
assertEquals(GuacamoleInvalidCredentialsException.class, e.getTargetException().getClass());
assertThat(
e.getTargetException().getMessage(),
CoreMatchers.containsString("The Token has expired on"));
}
}
}
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/AuthenticationProviderServiceTest.java/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/AuthenticationProviderServiceTest.java",
"repo_id": "AzureTRE",
"token_count": 8924
}
| 135 |
variable "workspace_id" {
type = string
description = "The workspace ID"
}
variable "aad_authority_url" {
type = string
description = "The Azure AD authority URL"
}
variable "tre_id" {
type = string
description = "The TRE ID"
}
variable "mgmt_resource_group_name" {
type = string
description = "The management resource group name"
}
variable "mgmt_acr_name" {
type = string
description = "The management ACR name"
}
variable "image_name" {
type = string
description = "The Guacamole image name"
}
variable "image_tag" {
type = string
description = "The Guacamole image tag"
}
variable "guac_disable_copy" {
type = bool
description = "Disable copy from the Guacamole workspace"
}
variable "guac_disable_paste" {
type = bool
description = "Disable paste to the Guacamole workspace"
}
variable "guac_enable_drive" {
type = bool
description = "Enable drive redirection"
}
variable "guac_drive_name" {
type = string
description = "The drive name"
}
variable "guac_drive_path" {
type = string
description = "The drive path"
}
variable "guac_disable_download" {
type = bool
description = "Disable download from the Guacamole workspace"
}
variable "guac_disable_upload" {
type = bool
description = "Disable upload to the Guacamole workspace"
}
variable "is_exposed_externally" {
type = bool
description = "Is the Guacamole workspace to be exposed externally?"
}
variable "tre_resource_id" {
type = string
description = "The workspace service ID"
}
variable "arm_environment" {
type = string
description = "The ARM cloud environment"
}
|
AzureTRE/templates/workspace_services/guacamole/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 617
}
| 136 |
locals {
short_service_id = substr(var.tre_resource_id, -4, -1)
short_workspace_id = substr(var.workspace_id, -4, -1)
short_parent_id = substr(var.parent_service_id, -4, -1)
workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}"
service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}"
vm_name = "windowsvm${local.short_service_id}"
keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}")
vm_password_secret_name = "${local.vm_name}-admin-credentials"
tre_user_resources_tags = {
tre_id = var.tre_id
tre_workspace_id = var.workspace_id
tre_workspace_service_id = var.parent_service_id
tre_user_resource_id = var.tre_resource_id
}
# Load VM SKU/image details from porter.yaml
porter_yaml = yamldecode(file("${path.module}/../porter.yaml"))
vm_sizes = local.porter_yaml["custom"]["vm_sizes"]
image_details = local.porter_yaml["custom"]["image_options"]
# Create local variables to support the VM resource
selected_image = local.image_details[var.image]
# selected_image_source_refs is an array to enable easy use of a dynamic block
selected_image_source_refs = lookup(local.selected_image, "source_image_reference", null) == null ? [] : [local.selected_image.source_image_reference]
selected_image_source_id = lookup(local.selected_image, "source_image_name", null) == null ? null : "${var.image_gallery_id}/images/${local.selected_image.source_image_name}"
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/locals.tf",
"repo_id": "AzureTRE",
"token_count": 687
}
| 137 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
export TF_LOG=""
# This variables are loaded in for us
# shellcheck disable=SC2154
terraform init -input=false -backend=true -reconfigure \
-backend-config="resource_group_name=$TF_VAR_mgmt_resource_group_name" \
-backend-config="storage_account_name=$TF_VAR_mgmt_storage_account_name" \
-backend-config="container_name=$TF_VAR_terraform_state_container_name" \
-backend-config="key=${TRE_ID}${TF_VAR_workspace_id}${TF_VAR_parent_service_id}guacamolewindowsvm"
terraform plan
terraform apply -auto-approve
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 254
}
| 138 |
---
schemaVersion: 1.0.0
name: tre-service-guacamole-windowsvm
version: 0.7.9
description: "An Azure TRE User Resource Template for Guacamole (Windows 10)"
dockerfile: Dockerfile.tmpl
registry: azuretre
custom:
# For information on vm_sizes and image_options, see README.me in the guacamole/user-resources folder
vm_sizes:
"2 CPU | 8GB RAM": Standard_D2s_v5
"4 CPU | 16GB RAM": Standard_D4s_v5
"8 CPU | 32GB RAM": Standard_D8s_v5
"16 CPU | 64GB RAM": Standard_D16s_v5
image_options:
"Windows 10":
source_image_reference:
publisher: MicrosoftWindowsDesktop
offer: Windows-10
sku: win10-21h2-pro-g2
version: latest
conda_config: false
"Windows 11":
source_image_reference:
publisher: microsoftwindowsdesktop
offer: windows-11
sku: win11-21h2-pro
version: latest
conda_config: false
"Server 2019 Data Science VM":
source_image_reference:
publisher: microsoft-dsvm
offer: dsvm-win-2019
sku: winserver-2019
version: latest
conda_config: true
# For information on using custom images, see README.me in the guacamole/user-resources folder
# "Custom Image From Gallery":
# source_image_name: your-image
# conda_config: true
credentials:
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: parent_service_id
type: string
description: "Resource group containing the shared ACR"
env: PARENT_SERVICE_ID
- name: image_gallery_id
type: string
description: Azure resource ID for the compute image gallery to pull images from (if specifying custom images by name)
default: ""
- name: id
type: string
description: "An Id for this installation"
- name: azure_environment
type: string
default: "AzureCloud"
description: "Used by Azure CLI to set the Azure environment"
env: id
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
env: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: os_image
type: string
default: "Windows 10"
- name: vm_size
type: string
default: "2 CPU | 8GB RAM"
- name: shared_storage_access
type: boolean
default: true
- name: shared_storage_name
type: string
default: "vm-shared-storage"
- name: arm_environment
type: string
outputs:
- name: ip
type: string
applyTo:
- install
- upgrade
- name: hostname
type: string
applyTo:
- install
- upgrade
- name: connection_uri
type: string
applyTo:
- install
- upgrade
- name: azure_resource_id
type: string
applyTo:
- install
- start
- stop
- reset_password
mixins:
- exec
- terraform:
clientVersion: 1.4.6
- az:
clientVersion: 2.37.0
install:
- terraform:
description: "Deploy Guacamole User Resource Service (Windows VM)"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
parent_service_id: ${ bundle.parameters.parent_service_id }
tre_resource_id: ${ bundle.parameters.id }
image: ${ bundle.parameters.os_image }
vm_size: ${ bundle.parameters.vm_size }
shared_storage_access: ${ bundle.parameters.shared_storage_access }
shared_storage_name: ${ bundle.parameters.shared_storage_name }
image_gallery_id: ${ bundle.parameters.image_gallery_id }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: ip
- name: hostname
- name: connection_uri
- name: azure_resource_id
upgrade:
- terraform:
description: "Update Guacamole User Resource Service (Windows VM)"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
parent_service_id: ${ bundle.parameters.parent_service_id }
tre_resource_id: ${ bundle.parameters.id }
image: ${ bundle.parameters.os_image }
vm_size: ${ bundle.parameters.vm_size }
shared_storage_access: ${ bundle.parameters.shared_storage_access }
shared_storage_name: ${ bundle.parameters.shared_storage_name }
image_gallery_id: ${ bundle.parameters.image_gallery_id }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: ip
- name: hostname
- name: connection_uri
- name: azure_resource_id
uninstall:
- exec:
description: "Delete the Extensions from the Terraform state manually"
command: ./delete_vm_extensions.sh
arguments:
- ${ bundle.parameters.tfstate_resource_group_name }
- ${ bundle.parameters.tfstate_storage_account_name }
- ${ bundle.parameters.tfstate_container_name }
- ${ bundle.parameters.id }
- terraform:
description: "Delete the Guacamole User Resource Service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
parent_service_id: ${ bundle.parameters.parent_service_id }
tre_resource_id: ${ bundle.parameters.id }
image: ${ bundle.parameters.os_image }
vm_size: ${ bundle.parameters.vm_size }
shared_storage_access: ${ bundle.parameters.shared_storage_access }
shared_storage_name: ${ bundle.parameters.shared_storage_name }
image_gallery_id: ${ bundle.parameters.image_gallery_id }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
start:
- terraform:
arguments:
- "output"
description: "Get resource ID from Terraform outputs"
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: azure_resource_id
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "Login to Azure"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id }
- az:
description: "Start the VM"
arguments:
- vm
- start
flags:
ids: ${ bundle.outputs.azure_resource_id }
stop:
- terraform:
arguments:
- "output"
description: "Get VM hostname and rg from Terraform outputs"
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: azure_resource_id
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "Login to Azure"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id }
- az:
description: "Stop the VM"
arguments:
- vm
- deallocate
flags:
ids: ${ bundle.outputs.azure_resource_id }
reset_password:
- terraform:
arguments:
- "output"
description: "Get VM details from Terraform outputs"
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.id }
outputs:
- name: azure_resource_id
- name: vm_username
- name: vm_password_secret_name
- name: keyvault_name
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "Login to Azure"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id }
- exec:
description: "Reset password and persist to keyvault"
suppress-output: true
command: ./reset_password.sh
arguments:
- ${ bundle.outputs.vm_password_secret_name }
- ${ bundle.outputs.keyvault_name }
- ${ bundle.outputs.vm_username }
- ${ bundle.outputs.azure_resource_id }
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 4083
}
| 139 |
---
schemaVersion: 1.0.0
name: tre-workspace-service-health
version: 0.2.5
description: "An Azure Data Health Services workspace service"
registry: azuretre
dockerfile: Dockerfile.tmpl
credentials:
# Credentials for interacting with the AAD Auth tenant
- name: auth_client_id
env: AUTH_CLIENT_ID
- name: auth_client_secret
env: AUTH_CLIENT_SECRET
- name: auth_tenant_id
env: AUTH_TENANT_ID
# Credentials for interacting with Azure
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: aad_authority_url
type: string
default: "https://login.microsoftonline.com"
# the following are added automatically by the resource processor
- name: id
type: string
description: "Resource ID"
env: id
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
env: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: arm_environment
env: ARM_ENVIRONMENT
type: string
default: "public"
- name: deploy_fhir
type: boolean
default: false
- name: deploy_dicom
type: boolean
default: false
- name: fhir_kind
type: string
default: ""
outputs:
- name: fhir_url
type: string
applyTo:
- install
- upgrade
- name: dicom_url
type: string
applyTo:
- install
- upgrade
- name: workspace_address_space
type: string
applyTo:
- install
- upgrade
mixins:
- terraform:
clientVersion: 1.3.6
- az:
clientVersion: 2.37.0
install:
- terraform:
description: "Deploy Azure Health workspace service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
deploy_fhir: ${ bundle.parameters.deploy_fhir }
deploy_dicom: ${ bundle.parameters.deploy_dicom }
fhir_kind: ${ bundle.parameters.fhir_kind }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
aad_authority_url: ${ bundle.parameters.aad_authority_url }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.name }-${ bundle.parameters.id }
outputs:
- name: fhir_url
- name: dicom_url
- name: workspace_address_space
upgrade:
- terraform:
description: "Upgrade Azure Health workspace service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
deploy_fhir: ${ bundle.parameters.deploy_fhir }
deploy_dicom: ${ bundle.parameters.deploy_dicom }
fhir_kind: ${ bundle.parameters.fhir_kind }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
aad_authority_url: ${ bundle.parameters.aad_authority_url }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.name }-${ bundle.parameters.id }
outputs:
- name: fhir_url
- name: dicom_url
- name: workspace_address_space
uninstall:
- terraform:
description: "Tear down AzurAzure Health Data Services workspace service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
deploy_fhir: ${ bundle.parameters.deploy_fhir }
deploy_dicom: ${ bundle.parameters.deploy_dicom }
fhir_kind: ${ bundle.parameters.fhir_kind }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
aad_authority_url: ${ bundle.parameters.aad_authority_url }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.name }-${ bundle.parameters.id }
|
AzureTRE/templates/workspace_services/health-services/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/health-services/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 2234
}
| 140 |
{
"schemaVersion": "1.0.1",
"name": "service-azureml",
"created": "2021-06-03T11:54:54.0225968Z",
"modified": "2021-06-03T11:54:54.0225968Z",
"parameters": [
{
"name": "id",
"source": {
"env": "ID"
}
},
{
"name": "workspace_id",
"source": {
"env": "WORKSPACE_ID"
}
},
{
"name": "tre_id",
"source": {
"env": "TRE_ID"
}
},
{
"name": "tfstate_container_name",
"source": {
"env": "TERRAFORM_STATE_CONTAINER_NAME"
}
},
{
"name": "tfstate_resource_group_name",
"source": {
"env": "MGMT_RESOURCE_GROUP_NAME"
}
},
{
"name": "tfstate_storage_account_name",
"source": {
"env": "MGMT_STORAGE_ACCOUNT_NAME"
}
},
{
"name": "arm_use_msi",
"source": {
"env": "ARM_USE_MSI"
}
}
]
}
|
AzureTRE/templates/workspace_services/innereye/parameters_service_azureml.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/parameters_service_azureml.json",
"repo_id": "AzureTRE",
"token_count": 528
}
| 141 |
FROM python:3.8-bullseye
# Install MLflow Python Packages
RUN pip install --no-cache-dir psycopg2==2.9.5 mlflow==2.0.1 azure-storage-blob==12.14.1
RUN apt-get update \
&& apt-get install openssh-server -y --no-install-recommends \
&& apt-get clean -y && rm -rf /var/lib/apt/lists/*
# define default server env variables
ENV MLFLOW_SERVER_HOST 0.0.0.0
ENV MLFLOW_SERVER_PORT 5000
ENV MLFLOW_SERVER_WORKERS 1
COPY ./docker/sshd_config /etc/ssh/
COPY ./docker/startup.sh /usr/local/bin/
EXPOSE 5000 2222
ENTRYPOINT ["sh", "/usr/local/bin/startup.sh"]
|
AzureTRE/templates/workspace_services/mlflow/mlflow-server/docker/Dockerfile/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/mlflow-server/docker/Dockerfile",
"repo_id": "AzureTRE",
"token_count": 230
}
| 142 |
output "connection_uri" {
value = "https://${azurerm_linux_web_app.mlflow.default_hostname}"
}
output "is_exposed_externally" {
value = false
}
|
AzureTRE/templates/workspace_services/mlflow/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 57
}
| 143 |
variable "workspace_id" {
type = string
}
variable "tre_id" {
type = string
}
variable "tre_resource_id" {
type = string
}
variable "sql_sku" {
type = string
}
variable "db_name" {
type = string
}
variable "storage_mb" {
type = number
validation {
condition = var.storage_mb > 5119 && var.storage_mb < 1048577
error_message = "The storage value is out of range."
}
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/workspace_services/mysql/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mysql/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 161
}
| 144 |
/****** Results and temp Schema creation ******/
CREATE SCHEMA [$(RESULTS_SCHEMA_NAME)]
GO
CREATE SCHEMA [$(TEMP_SCHEMA_NAME)]
GO
/****** Copy Data ******/
CREATE TABLE #tbl
WITH
( DISTRIBUTION = ROUND_ROBIN
)
AS
SELECT ROW_NUMBER() OVER(ORDER BY (SELECT NULL)) AS Sequence
, [name]
, 'CREATE TABLE ' + N'$(RESULTS_SCHEMA_NAME)' + '.' + t.name + ' WITH (DISTRIBUTION = ' + d.distribution_policy_desc + ', CLUSTERED COLUMNSTORE INDEX) AS SELECT * FROM ' + N'$(ORIGIN_RESULTS_SCHEMA_NAME)' + '.' + t.name AS sql_code
FROM sys.tables AS t left join sys.pdw_table_distribution_properties AS d ON (t.object_id = d.object_id)
WHERE t.schema_id = (select schema_id from sys.schemas where name = N'$(ORIGIN_RESULTS_SCHEMA_NAME)')
;
DECLARE @nbr_statements INT = (SELECT COUNT(*) FROM #tbl)
, @i INT = 1
;
WHILE @i <= @nbr_statements
BEGIN
DECLARE @sql_code NVARCHAR(4000) = (SELECT sql_code FROM #tbl WHERE Sequence = @i);
EXEC sp_executesql @sql_code;
SET @i +=1;
END
DROP TABLE #tbl;
|
AzureTRE/templates/workspace_services/ohdsi/sql/init_synapse_schemas.sql/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/sql/init_synapse_schemas.sql",
"repo_id": "AzureTRE",
"token_count": 432
}
| 145 |
variable "workspace_id" {
type = string
}
variable "tre_id" {
type = string
}
variable "tre_resource_id" {
type = string
}
variable "arm_environment" {
type = string
}
variable "address_space" {
type = string
description = "Address space for PostgreSQL's subnet"
}
# ATLAS Database
variable "postgres_sku" {
type = string
default = "B_Standard_B1ms"
description = "The SKU of the PostgreSQL database"
}
variable "postgres_storage_size_in_mb" {
type = number
default = 32768
description = "The storage size of the PostgreSQL database in MB"
}
# Data Source Configuration
variable "configure_data_source" {
type = bool
}
variable "data_source_config" {
type = string
default = null
}
variable "data_source_daimons" {
type = string
default = null
}
|
AzureTRE/templates/workspace_services/ohdsi/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 294
}
| 146 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/azure/azapi" {
version = "1.5.0"
constraints = ">= 1.3.0, 1.5.0"
hashes = [
"h1:wiX+oIlwttTKq6z3LGbrqmRCkN66C0YgqxM+jKQbejw=",
"zh:14109c169973e1b0d526131ca047e25f62700a44ecce4303aacb15793764be3e",
"zh:19417f2bbbadd0a079d51646a929d43ae7a0293f0fc13c3fa369d32780c1c846",
"zh:3254370d3304227ea0ec1352d98aa4a4a59e6a76ddede26454bdb55048101ec3",
"zh:34d9a96e6401f4fc087100b9c63aa47c77904a45478155671787854db13872c1",
"zh:62f615527a6bda5b9808baf75edf80648b106ba449418665ea4841ded978aee7",
"zh:6d3fc50788e74fba509586d99c4b80a1ef96345f21a0033746dfbf69dc6c2c1d",
"zh:7858f0e8b63590c34defd0ef7e844beaed942f8e2ae6df5a591d031d1db077a4",
"zh:80563967234c853f18cffffa821df7a5dd43c0a72c02e499111dcda0064223d7",
"zh:ae691de55bd1fd18820a5bf1b6bf8204711e8ddd01adde70a2db4e585523fb42",
"zh:b5700bab09282c0c05d76ca7910d43158e065d854e0780348fa8a5de06fba44f",
"zh:c378578d65b4a51e2aa57122c8149e136bad72e5c8de516d269e6259051f9428",
"zh:c5093f71eb18e84f93947d24c369e67a7dc4fa02950b9ae6b09cb71bc62a8b40",
]
}
provider "registry.terraform.io/hashicorp/azuread" {
version = "2.20.0"
constraints = ">= 2.20.0, 2.20.0"
hashes = [
"h1:qKo6WfRyml6w4qcnqDoeTmlWCL/kzng4qOB/5/XAW9g=",
"zh:0262b33661825b54edc0c539415ebdc942ecb3e2cf90af75f7ef134a1f901816",
"zh:0b569b6427e0a1f6c38ad19dd50f036bf65d5b64751e8a083fb36df76337faba",
"zh:1c3e89cf19118fc07d7b04257251fc9897e722c16e0a0df7b07fcd261f8c12e7",
"zh:4f3d017077eb9264ad4047ea0eda87ae7bc76da119f98361d10df27654b5b01c",
"zh:5566a523690f75f5fd4577f24a3194c719ebd22c011bf8619b86594a352afc71",
"zh:6101be64bf464d763585d144ee2cafae4aad74eb2f7f5264340addc9a9f227f7",
"zh:632627f20e48ce7e47f3be86a4d5869eb8412bf8083b5770decbb1e3cc335a1c",
"zh:63e7fbf0a34d7be50a4b83853600be6116a7c1600484d2e7ff2f15cc98abcf6f",
"zh:7909a7a074440e50be426f57e616e920745f8c38288537220f37c2d1ec719452",
"zh:e4f20c9887062a9ae1edcd208112d4d90c12afb7577f943220b54b83de8f10b7",
"zh:eb76ecf86977cd310f3311bc8f0015763c0a91594172a6b2d4ddb3d981d9c28e",
"zh:ffe05338f3e98fcbc5ffcf8b19dab8463849558d2ee6284afc91cdf9636c3330",
]
}
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.73.0"
constraints = ">= 3.8.0, >= 3.33.0, 3.73.0"
hashes = [
"h1:+Z5ZcAQO4e6aWh1N7zX4JqyV/xnDkTRYoCa8pEtNR20=",
"zh:068dfe743c9486d09eeaf97eb7817870b932ecb8132b92b8e0d96eadcf51b349",
"zh:2a16b0d50feb80919880d32cc12d636c37918bbc9133d7b3ff0d610bac1bee86",
"zh:2a77e0deabd3d0f83974125cedca7871add825bf4470688f117a35b6964916cf",
"zh:3ade6f3b9483746f168e7daf5223fd65d5d26313616bca37d9117d5b4fba2b66",
"zh:44554a1fc5f69a1069bbac3fbe1122794943692f81fc2aabda435740f5e10a67",
"zh:69d41ad1073f274548bca763a1ed14813388e5b9b702c15fdc78f2b22b082a09",
"zh:8cf5ce91432fc5ed1b9906bca14ab6f0d3b18e78a9f25e00b1de632ae7669645",
"zh:b70c294e7d55c3404c40ae18e54113e625ee975e80e3f7d558f3fedde89b038e",
"zh:cadab8bc17685a239f45438c555fba156baa709803da55f59cce8c7f1cb70fc1",
"zh:cb74e02e1495df938d464e233a41aa5ffab9f0fd79079016d0a630955ce92b6d",
"zh:cd7a68c03005116fe40542d312d0236ab5bfdd20a2bb6bdf6398d64945c25ef8",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.3.2"
constraints = "~> 3.3.0"
hashes = [
"h1:H5V+7iXol/EHB2+BUMzGlpIiCOdV74H8YjzCxnSAWcg=",
"zh:038293aebfede983e45ee55c328e3fde82ae2e5719c9bd233c324cfacc437f9c",
"zh:07eaeab03a723d83ac1cc218f3a59fceb7bbf301b38e89a26807d1c93c81cef8",
"zh:427611a4ce9d856b1c73bea986d841a969e4c2799c8ac7c18798d0cc42b78d32",
"zh:49718d2da653c06a70ba81fd055e2b99dfd52dcb86820a6aeea620df22cd3b30",
"zh:5574828d90b19ab762604c6306337e6cd430e65868e13ef6ddb4e25ddb9ad4c0",
"zh:7222e16f7833199dabf1bc5401c56d708ec052b2a5870988bc89ff85b68a5388",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b1b2d7d934784d2aee98b0f8f07a8ccfc0410de63493ae2bf2222c165becf938",
"zh:b8f85b6a20bd264fcd0814866f415f0a368d1123cd7879c8ebbf905d370babc8",
"zh:c3813133acc02bbebddf046d9942e8ba5c35fc99191e3eb057957dafc2929912",
"zh:e7a41dbc919d1de800689a81c240c27eec6b9395564630764ebb323ea82ac8a9",
"zh:ee6d23208449a8eaa6c4f203e33f5176fa795b4b9ecf32903dffe6e2574732c2",
]
}
|
AzureTRE/templates/workspaces/base/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 2699
}
| 147 |
terraform {
# In modules we should only specify the min version
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">= 3.8.0"
}
azapi = {
source = "Azure/azapi"
version = ">= 1.3.0"
}
}
}
|
AzureTRE/templates/workspaces/base/terraform/azure-monitor/providers.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/azure-monitor/providers.tf",
"repo_id": "AzureTRE",
"token_count": 124
}
| 148 |
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.73.0"
}
azuread = {
source = "hashicorp/azuread"
version = "=2.20.0"
}
azapi = {
source = "Azure/azapi"
version = "=1.5.0"
}
}
backend "azurerm" {}
}
provider "azurerm" {
features {
key_vault {
# Don't purge on destroy (this would fail due to purge protection being enabled on keyvault)
purge_soft_delete_on_destroy = false
purge_soft_deleted_secrets_on_destroy = false
purge_soft_deleted_certificates_on_destroy = false
purge_soft_deleted_keys_on_destroy = false
# When recreating an environment, recover any previously soft deleted secrets - set to true by default
recover_soft_deleted_key_vaults = true
recover_soft_deleted_secrets = true
recover_soft_deleted_certificates = true
recover_soft_deleted_keys = true
}
}
}
provider "azuread" {
client_id = var.auth_client_id
client_secret = var.auth_client_secret
tenant_id = var.auth_tenant_id
}
provider "azapi" {
}
|
AzureTRE/templates/workspaces/base/terraform/providers.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/providers.tf",
"repo_id": "AzureTRE",
"token_count": 515
}
| 149 |
# Getting Started with Create React App and Fluent UI
This is a [Create React App](https://github.com/facebook/create-react-app) based repo that comes with Fluent UI pre-installed!
## Available Scripts
In the project directory, you can run:
### `yarn start`
Runs the app in the development mode.<br>
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
The page will reload if you make edits.<br>
You will also see any lint errors in the console.
### `yarn test`
Launches the test runner in the interactive watch mode.<br>
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
### `yarn build`
Builds the app for production to the `build` folder.<br>
It correctly bundles React in production mode and optimizes the build for the best performance.
The build is minified and the filenames include the hashes.<br>
Your app is ready to be deployed!
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
### `yarn eject`
**Note: this is a one-way operation. Once you `eject`, you can’t go back!**
If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
Instead, it will copy all the configuration files and the transitive dependencies (Webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own.
You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it.
## Learn More
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
To learn React, check out the [React documentation](https://reactjs.org/).
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit [CLA](https://cla.microsoft.com).
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
|
AzureTRE/ui/app/README.md/0
|
{
"file_path": "AzureTRE/ui/app/README.md",
"repo_id": "AzureTRE",
"token_count": 793
}
| 150 |
import { Dialog, DialogFooter, PrimaryButton, DefaultButton, DialogType, Spinner } from '@fluentui/react';
import React, { useContext, useState } from 'react';
import { Resource } from '../../models/resource';
import { HttpMethod, ResultType, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { ResourceType } from '../../models/resourceType';
import { APIError } from '../../models/exceptions';
import { LoadingState } from '../../models/loadingState';
import { ExceptionLayout } from './ExceptionLayout';
import { useAppDispatch } from '../../hooks/customReduxHooks';
import { addUpdateOperation } from '../shared/notifications/operationsSlice';
interface ConfirmDeleteProps {
resource: Resource,
onDismiss: () => void
}
// show a 'are you sure' modal, and then send a patch if the user confirms
export const ConfirmDeleteResource: React.FunctionComponent<ConfirmDeleteProps> = (props: ConfirmDeleteProps) => {
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
const [loading, setLoading] = useState(LoadingState.Ok);
const workspaceCtx = useContext(WorkspaceContext);
const dispatch = useAppDispatch();
const deleteProps = {
type: DialogType.normal,
title: 'Delete Resource?',
closeButtonAriaLabel: 'Close',
subText: `Are you sure you want to permanently delete ${props.resource.properties.display_name}?`,
};
const dialogStyles = { main: { maxWidth: 450 } };
const modalProps = {
titleAriaId: 'labelId',
subtitleAriaId: 'subTextId',
isBlocking: true,
styles: dialogStyles
};
const wsAuth = (props.resource.resourceType === ResourceType.WorkspaceService || props.resource.resourceType === ResourceType.UserResource);
const deleteCall = async () => {
setLoading(LoadingState.Loading);
try {
let op = await apiCall(props.resource.resourcePath, HttpMethod.Delete, wsAuth ? workspaceCtx.workspaceApplicationIdURI : undefined, undefined, ResultType.JSON);
dispatch(addUpdateOperation(op.operation));
props.onDismiss();
} catch (err: any) {
err.userMessage = 'Failed to delete resource';
setApiError(err);
setLoading(LoadingState.Error);
}
}
return (<>
<Dialog
hidden={false}
onDismiss={() => props.onDismiss()}
dialogContentProps={deleteProps}
modalProps={modalProps}
>
{
loading === LoadingState.Ok &&
<DialogFooter>
<PrimaryButton text="Delete" onClick={() => deleteCall()} />
<DefaultButton text="Cancel" onClick={() => props.onDismiss()} />
</DialogFooter>
}
{
loading === LoadingState.Loading &&
<Spinner label="Sending request..." ariaLive="assertive" labelPosition="right" />
}
{
loading === LoadingState.Error &&
<ExceptionLayout e={apiError} />
}
</Dialog>
</>);
};
|
AzureTRE/ui/app/src/components/shared/ConfirmDeleteResource.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ConfirmDeleteResource.tsx",
"repo_id": "AzureTRE",
"token_count": 1031
}
| 151 |
import { DefaultPalette, IStackItemStyles, Stack } from "@fluentui/react";
interface ResourceOperationListItemProps {
header: String,
val: String
}
export const ResourceOperationListItem: React.FunctionComponent<ResourceOperationListItemProps> = (props: ResourceOperationListItemProps) => {
const stackItemStyles: IStackItemStyles = {
root: {
padding: '5px 0',
color: DefaultPalette.neutralSecondary
}
}
return(
<>
<Stack wrap horizontal>
<Stack.Item styles={stackItemStyles} style={{width:'20%'}}>
{props.header}
</Stack.Item>
<Stack.Item styles={stackItemStyles} style={{width:'80%'}}>
: {props.val}
</Stack.Item>
</Stack>
</>
);
}
|
AzureTRE/ui/app/src/components/shared/ResourceOperationListItem.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceOperationListItem.tsx",
"repo_id": "AzureTRE",
"token_count": 401
}
| 152 |
import { Spinner, SpinnerSize } from "@fluentui/react";
import { useEffect, useState } from "react";
import { LoadingState } from "../../../models/loadingState";
import { HttpMethod, ResultType, useAuthApiCall } from "../../../hooks/useAuthApiCall";
import Form from "@rjsf/fluent-ui";
import { Operation } from "../../../models/operation";
import { Resource } from "../../../models/resource";
import { ResourceType } from "../../../models/resourceType";
import { APIError } from "../../../models/exceptions";
import { ExceptionLayout } from "../ExceptionLayout";
import { ResourceTemplate, sanitiseTemplateForRJSF } from "../../../models/resourceTemplate";
interface ResourceFormProps {
templateName: string,
templatePath: string,
resourcePath: string,
updateResource?: Resource,
onCreateResource: (operation: Operation) => void,
workspaceApplicationIdURI?: string
}
export const ResourceForm: React.FunctionComponent<ResourceFormProps> = (props: ResourceFormProps) => {
const [template, setTemplate] = useState<any | null>(null);
const [formData, setFormData] = useState({});
const [loading, setLoading] = useState(LoadingState.Loading as LoadingState);
const [sendingData, setSendingData] = useState(false);
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
useEffect(() => {
const getFullTemplate = async () => {
try {
// Get the full resource template containing the required parameters
const templateResponse = (await apiCall(props.updateResource ? `${props.templatePath}?is_update=true&version=${props.updateResource.templateVersion}` : props.templatePath, HttpMethod.Get)) as ResourceTemplate;
// if it's an update, populate the form with the props that are available in the template
if (props.updateResource) {
setFormData(props.updateResource.properties);
}
const sanitisedTemplate = sanitiseTemplateForRJSF(templateResponse);
setTemplate(sanitisedTemplate);
setLoading(LoadingState.Ok);
} catch (err: any) {
err.userMessage = "Error retrieving resource template";
setApiError(err);
setLoading(LoadingState.Error);
}
};
// Fetch full resource template only if not in state
if (!template) {
getFullTemplate();
}
}, [apiCall, props.templatePath, template, props.updateResource]);
const removeReadOnlyProps = (data: any, template: ResourceTemplate): any => {
// flatten all the nested properties from across the template into a basic array we can iterate easily
let allProps = {} as any;
const recurseTemplate = (templateFragment: any) => {
Object.keys(templateFragment).forEach((key) => {
if (key === "properties") {
Object.keys(templateFragment[key]).forEach((prop) => {
allProps[prop] = templateFragment[key][prop];
});
}
if (typeof (templateFragment[key]) === "object" && key !== "if") {
recurseTemplate(templateFragment[key]);
}
})
}
recurseTemplate(template);
// iterate the data payload
for (let prop in data) {
// if the prop isn't in the template, or it's readOnly, delete it
if (!allProps[prop] || allProps[prop].readOnly === true) {
delete data[prop];
}
}
return data;
}
const createUpdateResource = async (formData: any) => {
const data = removeReadOnlyProps(formData, template);
console.log("parsed payload to send", data);
setSendingData(true);
let response;
try {
if (props.updateResource) {
const wsAuth =
props.updateResource.resourceType === ResourceType.WorkspaceService
|| props.updateResource.resourceType === ResourceType.UserResource;
response = await apiCall(
props.updateResource.resourcePath,
HttpMethod.Patch,
wsAuth ? props.workspaceApplicationIdURI : undefined,
{ properties: data },
ResultType.JSON,
undefined, undefined, props.updateResource._etag);
} else {
const resource = { templateName: props.templateName, properties: data };
response = await apiCall(
props.resourcePath,
HttpMethod.Post,
props.workspaceApplicationIdURI,
resource,
ResultType.JSON);
}
setSendingData(false);
props.onCreateResource(response.operation);
} catch (err: any) {
err.userMessage = 'Error sending create / update request';
setApiError(err);
setLoading(LoadingState.Error);
setSendingData(false);
}
}
// use the supplied uiSchema or create a blank one, and set the overview field to textarea manually.
const uiSchema = (template && template.uiSchema) || {};
uiSchema.overview = {
"ui:widget": "textarea"
}
// if no specific order has been set, set a generic one with the primary fields at the top
if (!uiSchema["ui:order"] || uiSchema["ui:order"].length === 0) {
uiSchema["ui:order"] = [
"display_name",
"description",
"overview",
"*"
]
}
switch (loading) {
case LoadingState.Ok:
return (
template &&
<div style={{ marginTop: 20 }}>
{
sendingData ?
<Spinner label="Sending request" ariaLive="assertive" labelPosition="bottom" size={SpinnerSize.large} />
:
<Form omitExtraData={true} schema={template} formData={formData} uiSchema={uiSchema} onSubmit={(e: any) => createUpdateResource(e.formData)} />
}
</div>
)
case LoadingState.Error:
return (
<ExceptionLayout e={apiError} />
);
default:
return (
<div style={{ marginTop: 20 }}>
<Spinner label="Loading template" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
)
}
}
|
AzureTRE/ui/app/src/components/shared/create-update-resource/ResourceForm.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/create-update-resource/ResourceForm.tsx",
"repo_id": "AzureTRE",
"token_count": 2209
}
| 153 |
{
"rootClientId": "",
"rootTenantId": "",
"treApplicationId": "api://",
"treUrl": "https://my-tre.northeurope.cloudapp.azure.com/api",
"pollingDelayMilliseconds": 10000,
"treId": "my-tre",
"debug": false,
"version": "0.0.0",
"activeDirectoryUri": ""
}
|
AzureTRE/ui/app/src/config.source.json/0
|
{
"file_path": "AzureTRE/ui/app/src/config.source.json",
"repo_id": "AzureTRE",
"token_count": 133
}
| 154 |
import { ResourceType } from "./resourceType";
export interface ResourceTemplate {
id: string,
name: string,
type: string,
description: string,
version: string,
title: string,
resourceType: ResourceType,
current: boolean,
properties: any,
allOf?: Array<any>,
system_properties: any,
actions: Array<TemplateAction>,
customActions: Array<TemplateAction>,
required: Array<string>,
uiSchema: any,
pipeline: any
}
export const sanitiseTemplateForRJSF = (template: ResourceTemplate) => {
if (template.properties) {
Object.keys(template.properties).forEach((key: string) => {
Object.keys(template.properties[key]).forEach((name: string) => {
if (template.properties[key][name] === null) {
delete template.properties[key][name]
}
});
});
}
const sanitised = {
name: template.name,
type: template.type,
description: template.description,
title: template.title,
properties: template.properties,
allOf: template.allOf,
required: template.required,
uiSchema: template.uiSchema
}
if (!sanitised.allOf) delete sanitised.allOf;
return sanitised;
};
export interface TemplateAction {
name: string,
description: string
}
// make a sensible guess at an icon
export const getActionIcon = (actionName: string) => {
switch(actionName.toLowerCase()){
case 'start':
return 'Play';
case 'stop':
return 'Stop';
default:
return 'Asterisk'
}
};
|
AzureTRE/ui/app/src/models/resourceTemplate.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/resourceTemplate.ts",
"repo_id": "AzureTRE",
"token_count": 536
}
| 155 |
# TODO: The maintainer of this repo has not yet edited this file
**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project?
- **No CSS support:** Fill out this template with information about how to file issues and get help.
- **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps.
- **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide.
*Then remove this first heading from this SUPPORT.MD file before publishing your repo.*
# Support
## How to file issues and get help
This project uses GitHub Issues to track bugs and feature requests. Please search the existing
issues before filing new issues to avoid duplicates. For new issues, file your bug or
feature request as a new Issue.
For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE
FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER
CHANNEL. WHERE WILL YOU HELP PEOPLE?**.
## Microsoft Support Policy
Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
|
BioGPT/SUPPORT.md/0
|
{
"file_path": "BioGPT/SUPPORT.md",
"repo_id": "BioGPT",
"token_count": 317
}
| 156 |
FORMAT=$1
GOLD_FILE=$2
PREDICTION_FILE=$3
java -cp bc5cdr_eval.jar ncbi.bc5cdr_eval.Evaluate relation CID $FORMAT $GOLD_FILE $PREDICTION_FILE | grep -v INFO
# java -cp bc5cdr_eval.jar ncbi.bc5cdr_eval.Evaluate relation CID $FORMAT $GOLD_FILE $PREDICTION_FILE
|
BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/eval_relation.sh/0
|
{
"file_path": "BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/eval_relation.sh",
"repo_id": "BioGPT",
"token_count": 115
}
| 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import json
from sklearn.metrics import accuracy_score
pred_file = sys.argv[1]
gold_file = sys.argv[2]
def do_eval(preds, golden):
print(accuracy_score(golden, preds))
return
def main():
preds = []
with open(pred_file) as reader:
for line in reader:
preds.append(line.strip())
golden = []
if gold_file.endswith('.tsv'):
with open(gold_file) as reader:
for line in reader:
line = line.strip()
if line != '' and len(line) > 0:
golden.append(line.strip().split('\t')[-1])
elif gold_file.endswith('.json'):
with open(gold_file) as reader:
data = json.load(reader)
golden = [label for pmid, label in data.items()]
assert len(preds) == len(golden), f"{len(preds)} {len(golden)}"
print("\n====File: ", os.path.basename(pred_file))
do_eval(preds, golden)
if __name__ == "__main__":
main()
|
BioGPT/examples/QA-PubMedQA/hard_match_evaluation.py/0
|
{
"file_path": "BioGPT/examples/QA-PubMedQA/hard_match_evaluation.py",
"repo_id": "BioGPT",
"token_count": 473
}
| 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import re
import json
out_file = sys.argv[1]
prefix = [
'(learned[0-9]+ )+',
'we can conclude that',
'we have that',
'in conclusion,',
]
def strip_prefix(line):
for p in prefix:
res = re.search(p, line)
if res is not None:
line = re.split(p, line)[-1].strip()
break
return line
def split_sentence(line):
sentences = re.split(r"; ", line)
return sentences
def convert_relis_sentence(sentence):
ans = None
segs = re.match(r"the interaction between (.*) and (.*) is (.*)", sentence)
if segs is not None:
segs = segs.groups()
ans = (segs[0].strip(), segs[2].strip(), segs[1].strip())
return ans
def converter(sample, h_idx=0, r_idx=1, t_idx=2):
ret = {"triple_list_gold": [], "triple_list_pred": [], "new": [], "lack": [], "id": [0]}
for s in sample:
ret["triple_list_pred"].append({"subject": s[h_idx], "relation": s[r_idx], "object": s[t_idx]})
return ret
all_lines = []
with open(out_file, "r", encoding="utf8") as fr:
for line in fr:
e = line.strip()
if len(e) > 0 and e[-1] == ".":
all_lines.append(e[:-1])
else:
all_lines.append(e)
hypothesis = []
cnt = 0
fail_cnt = 0
for i, line in enumerate(all_lines):
cnt += 1
ret = []
strip_line = strip_prefix(line)
sentences = split_sentence(strip_line)
for sen in sentences:
ans = convert_relis_sentence(sen)
if ans is not None:
ret.append(ans)
if len(ret) > 0:
hypothesis.append(ret)
else:
hypothesis.append([("failed", "failed", "failed")])
fail_cnt += 1
print("Failed:id:{}, line:{}".format(i+1, line))
ret_formatted = []
for i in range(len(hypothesis)):
ret_formatted.append(converter(hypothesis[i]))
with open(f"{out_file}.extracted.json", "w", encoding="utf8") as fw:
for eg in ret_formatted:
print(json.dumps(eg), file=fw)
print(f"failed = {fail_cnt}, total = {cnt}")
|
BioGPT/examples/RE-DDI/postprocess.py/0
|
{
"file_path": "BioGPT/examples/RE-DDI/postprocess.py",
"repo_id": "BioGPT",
"token_count": 962
}
| 159 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .language_modeling_prompt import *
from .transformer_lm_prompt import *
from .language_model_prompt_dataset import *
from .constrained_generator import *
|
BioGPT/src/__init__.py/0
|
{
"file_path": "BioGPT/src/__init__.py",
"repo_id": "BioGPT",
"token_count": 68
}
| 160 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy
from bitblas.base.roller.arch import CUDA
from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags
from bitblas.gpu import Matmul
from bitblas.utils import auto_detect_nvidia_target
from bitblas.base.utils import apply_and_build
from bitblas.ops.impl.matmul_impl import (
matmul_nn,
matmul_nt,
matmul_nt_propagate_a_propagate_b,
)
import time
# fmt:off
test_shapes = [
# (prim_func, input_args, default_bitblas_schedule),
(matmul_nt, (1024, 1024, 1024, "float16", "float16"), Matmul),
(matmul_nt, (16, 8192, 8192, "float16", "float16"), Matmul),
(matmul_nt, (32, 8192, 8192, "float16", "float16"), Matmul),
(matmul_nt, (16384, 16384, 16384, "float16", "float16"), Matmul),
(matmul_nt, (16384, 16384, 16384, "int8", "int32"), Matmul),
(matmul_nn, (1024, 1024, 1024, "float16", "float16"), Matmul),
(matmul_nn, (8192, 8192, 8192, "float16", "float16"), Matmul),
(matmul_nn, (16384, 16384, 16384, "float16", "float16"), Matmul),
(matmul_nt, (1024, 1024, 1024, "float32", "float32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (16384, 16384, 16384, "float16", "float16", "float16"),
Matmul),
]
llm_shapes = [
# square test
(matmul_nt_propagate_a_propagate_b, (16384, 16384, 16384, "float16", "float16"), Matmul),
# BLOOM-176B
(matmul_nt_propagate_a_propagate_b, (8192, 43008, 14336, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 14336, 14336, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 57344, 14336, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 14336, 57344, "float16", "float16"), Matmul),
# # OPT-65B
(matmul_nt_propagate_a_propagate_b, (8192, 9216, 9216, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 36864, 9216, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 9216, 36864, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 22016, 8192, "float16", "float16"), Matmul),
# # LLAMA-70B/65B
(matmul_nt_propagate_a_propagate_b, (8192, 8192, 22016, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 8192, 8192, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 28672, 8192, "float16", "float16"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 8192, 28672, "float16", "float16"), Matmul),
# square test
(matmul_nt_propagate_a_propagate_b, (16384, 16384, 16384, "int8", "int8", "int32"), Matmul),
# BLOOM-176B
(matmul_nt_propagate_a_propagate_b, (8192, 43008, 14336, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 14336, 14336, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 57344, 14336, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 14336, 57344, "int8", "int8", "int32"), Matmul),
# OPT-65B
(matmul_nt_propagate_a_propagate_b, (8192, 9216, 9216, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 36864, 9216, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 9216, 36864, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 22016, 8192, "int8", "int8", "int32"), Matmul),
# LLAMA-70B/65B
(matmul_nt_propagate_a_propagate_b, (8192, 8192, 22016, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 8192, 8192, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 28672, 8192, "int8", "int8", "int32"), Matmul),
(matmul_nt_propagate_a_propagate_b, (8192, 8192, 28672, "int8", "int8", "int32"), Matmul),
]
benchmark_sets = []
benchmark_sets.extend(llm_shapes)
# fmt:on
target = tvm.target.Target(auto_detect_nvidia_target())
benchmark_results = {}
for get_prim_func, input_args, d_schedule in benchmark_sets:
ir_module = get_prim_func(*input_args)
func = ir_module["main"]
arch = CUDA(target)
policy = DefaultPolicy(func=func, arch=arch)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception:
tags = None
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(20)
tune_start = time.time()
cpresults, best = apply_and_build(func, configs, arch, parallel_build=False)
fast_tune_time = time.time() - tune_start
print("[BitBLAS] The best latency of top 1 is {:.3f} ms".format(cpresults[0].latency * 1e3))
print("[BitBLAS] The best latency of top 20 is {:.3f} ms".format(best.latency * 1e3))
# evaluate the performance of the default schedule
rule = d_schedule()
default_tune_start = time.time()
sch_default = rule.apply(func, target, False)
with tvm.transform.PassContext(config={"tir.use_async_copy": True}):
mod_default = tvm.build(sch_default.mod["main"], target="cuda")
default_tune_time = time.time() - default_tune_start
args = func.buffer_map.values()
profile_tensors = best.profile_tensors
timer_cuda_mod = mod_default.time_evaluator(mod_default.entry_name, arch.device, number=5)
t = timer_cuda_mod(*profile_tensors).mean
print("Time cost of BitBLAS default schedule: {:.3f} ms".format(t * 1e3))
profile_config = {
f"{get_prim_func.__name__}-{'-'.join([str(i) for i in input_args])}": {
"fast_bitblas_top20_tune_time": fast_tune_time,
"fast_bitblas_top1_latency": cpresults[0].latency * 1e3,
"fast_bitblas_top20_latency": best.latency * 1e3,
"default_bitblas_tune_time": default_tune_time,
"default_bitblas_latency": t * 1e3,
}
}
benchmark_results.update(profile_config)
headers = [
"PrimFunc",
"Input Arguments",
"BitBLAS Top20 Tune Time",
"BitBLAS Top1 Latency",
"BitBLAS Top20 Latency",
"DefaultDLight Tune Time",
"DefaultDLight Latency",
]
col_width = (max(len(word) for row in [headers] + list(profile_config.values()) for word in row) + 2
) # padding
print("".join(word.ljust(col_width) for word in headers))
print("-" * col_width * len(headers))
for config, values in benchmark_results.items():
args = config.split("-")
func_name = args[0]
input_args = "-".join(args[1:])
row = [
func_name,
input_args,
f" {str(values['fast_bitblas_top20_tune_time'])} s",
f"{values['fast_bitblas_top1_latency']:.3f} ms",
f"{values['fast_bitblas_top20_latency']:.3f} ms",
str(values["default_bitblas_tune_time"]),
f"{values['default_bitblas_latency']:.3f} ms",
]
print("".join(word.ljust(col_width) for word in row))
|
BitBLAS/benchmark/dsl/matmul.py/0
|
{
"file_path": "BitBLAS/benchmark/dsl/matmul.py",
"repo_id": "BitBLAS",
"token_count": 3145
}
| 161 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring, invalid-name
"""This is modified from https://huggingface.co/1bitLLM/bitnet_b1_58-3B/blob/main/utils_quant.py to work with BitBLAS."""
import torch
from torch import nn
import bitblas
from bitblas.cache import global_operator_cache, get_database_path
from bitblas import Matmul, MatmulConfig
from bitblas import auto_detect_nvidia_target
from logging import getLogger
logger = getLogger(__name__)
bitblas.set_log_level("INFO")
BITBLAS_TARGET = auto_detect_nvidia_target()
BITBLAS_DATABASE_PATH = get_database_path()
def weight_quant(weight, num_bits=1):
dtype = weight.dtype
weight = weight.float()
s = 1 / weight.abs().mean().clamp(min=1e-5)
result = (weight * s).round().clamp(-1, 1) / s
return result.type(dtype)
def activation_quant(x, num_bits=8):
dtype = x.dtype
x = x.float()
Qn = -(2**(num_bits - 1))
Qp = 2**(num_bits - 1) - 1
s = Qp / x.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
result = (x * s).round().clamp(Qn, Qp) / s
return result.type(dtype)
# BitBLAS BitLinear
class BitLinear(nn.Linear):
def __init__(self, *kargs, weight_bits=1, input_bits=8, **kwargs):
super(BitLinear, self).__init__(*kargs, **kwargs)
"""
RMSNorm is placed outside BitLinear
"""
self.weight_bits = weight_bits
self.input_bits = input_bits
matmul_config = MatmulConfig(
N=self.out_features, # N dimension
K=self.in_features, # K dimension
A_dtype="int8", # activation A dtype
W_dtype="int2", # weight W dtype
accum_dtype="int32", # accumulation dtype
out_dtype="float32", # output dtype
layout="nt", # matrix layout, "nt" indicates the layout of A is non-transpose and the layout of W is transpose
with_bias=False, # bias
# configs for weight only quantization
group_size=None, # setting for grouped quantization
with_scaling=False, # setting for scaling factor
with_zeros=False, # setting for zeros
zeros_mode=None, # setting for how to calculating zeros
)
ENABLE_TUNING = True
self.bitblas_matmul = self._get_or_create_bitblas_operator(matmul_config, ENABLE_TUNING)
self.Qp = 2**(self.input_bits - 1) - 1
def _get_or_create_bitblas_operator(self, config, enable_tuning):
if global_operator_cache.size() == 0:
global_operator_cache.load_from_database(BITBLAS_DATABASE_PATH, BITBLAS_TARGET)
logger.info(f"Loaded {global_operator_cache.size()} operators from database.")
bitblas_matmul = global_operator_cache.get(config)
if bitblas_matmul is None:
# should disable tuning for the first time because we may require loading bitblas operator from database.
bitblas_matmul = Matmul(config, target=BITBLAS_TARGET, enable_tuning=False)
if enable_tuning:
bitblas_matmul.hardware_aware_finetune(topk=20)
global_operator_cache.add(config, bitblas_matmul)
global_operator_cache.save_into_database(BITBLAS_DATABASE_PATH, BITBLAS_TARGET)
print("BitBLAS Tuning done, appended operator to global_operator_cache.")
else:
print("BitBLAS Operator created.")
else:
print("BitBLAS Operator found in global_operator_cache.")
return bitblas_matmul
def post_process_weights(self):
sw = 1 / self.weight.abs().mean().clamp(min=1e-5)
self.sw = sw
quant_weight = self.weight_quant(self.weight).detach()
quant_weight = self.bitblas_matmul.transform_weight(quant_weight)
self.weight = nn.Parameter(quant_weight, requires_grad=False)
def weight_quant(self, weight):
weight = weight.float()
s = 1 / weight.abs().mean().clamp(min=1e-5)
result = (weight * s).round().clamp(-1, 1)
return result.type(torch.int8)
def activation_quant(self, x, num_bits=8):
x = x.float()
Qn = -(2**(num_bits - 1))
Qp = 2**(num_bits - 1) - 1
s = Qp / x.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
result = (x * s).round().clamp(Qn, Qp)
return result.type(torch.int8)
# for the correctness evaluation.
def native_forward(self, input):
quant_input = (input + (activation_quant(input, self.input_bits) - input).detach())
quant_weight = (
self.weight + (weight_quant(self.weight, self.weight_bits) - self.weight).detach())
out = nn.functional.linear(quant_input, quant_weight)
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out
def forward_fp32_simulated(self, input):
print("input: ", input)
quant_input = self.activation_quant(input, self.input_bits).detach()
quant_weight = self.weight_quant(self.weight).detach()
fp32_simulated_input = quant_input.float()
fp32_simulated_weight = quant_weight.float()
fp32_simulated_out = nn.functional.linear(fp32_simulated_input, fp32_simulated_weight)
sw = 1 / self.weight.abs().mean().clamp(min=1e-5)
Qp = 2**(self.input_bits - 1) - 1
si = Qp / input.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
# if / (si * sw) it will inf in some cases
out = fp32_simulated_out / si
out = out / sw
out = out.half()
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out
def forward(self, input):
quant_input = self.activation_quant(input, self.input_bits).detach()
fp32_out = self.bitblas_matmul(quant_input, self.weight)
sw = self.sw
Qp = self.Qp
si = Qp / input.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
# if / (si * sw) it will inf in some cases
out = fp32_out / si
out = out / sw
out = out.half()
if self.bias is not None:
out += self.bias.view(1, -1).expand_as(out)
return out
# # Naive BitLinear from HuggingFace
# class BitLinear(nn.Linear):
# def __init__(self, *kargs, weight_bits=1, input_bits=8, **kwargs):
# super(BitLinear, self).__init__(*kargs, **kwargs)
# """
# RMSNorm is placed outside BitLinear
# """
# self.weight_bits = weight_bits
# self.input_bits = input_bits
# def forward(self, input):
# quant_input = input + (activation_quant(input, self.input_bits) - input).detach()
# quant_weight = self.weight + (weight_quant(self.weight, self.weight_bits) -
# self.weight).detach()
# out = nn.functional.linear(quant_input, quant_weight)
# if not self.bias is None:
# out += self.bias.view(1, -1).expand_as(out)
# return out
|
BitBLAS/integration/BitNet/utils_quant.py/0
|
{
"file_path": "BitBLAS/integration/BitNet/utils_quant.py",
"repo_id": "BitBLAS",
"token_count": 3204
}
| 162 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import bitblas
from bitblas_linear import Linear as BitBLASLinear
import torch
import time
import numpy as np
import torch.nn as nn
import pytest
torch.manual_seed(0)
@pytest.mark.parametrize(
"m, in_features, out_features, bias",
[
(1, 1024, 1024, False),
(1, 1024, 1024, True),
(1024, 1024, 1024, False),
(1024, 1024, 1024, True),
],
)
def test_correctness_static_shape(m, in_features, out_features, bias):
linear_torch = (nn.Linear(in_features, out_features, bias=bias).to(torch.float16).cuda())
linear_bitblas = BitBLASLinear(
in_features,
out_features,
bias=bias,
dtype=torch.float16,
opt_M=m,
enable_tuning=False,
).cuda()
with torch.no_grad():
linear_bitblas.weight = nn.Parameter(linear_torch.weight.clone())
if bias:
linear_bitblas.bias = nn.Parameter(linear_torch.bias.clone())
with torch.no_grad():
input_data = torch.randn(m, in_features, dtype=torch.float16).cuda()
output_torch = linear_torch(input_data)
output_bitblas = linear_bitblas(input_data)
torch.testing.assert_close(output_torch, output_bitblas, rtol=1e-1, atol=1e-2)
def profile(model, input_data):
model = model.cuda()
model.eval()
output = torch.empty(
input_data.shape[:-1] + (model.out_features,),
dtype=input_data.dtype,
device=input_data.device,
)
def get_runtime(num_repeats=1):
tic = time.time()
for _ in range(num_repeats):
_ = model(input_data, output)
torch.cuda.synchronize()
return (time.time() - tic) * 1000 / num_repeats
with torch.no_grad():
# print("Warming up ...")
st = time.time()
while time.time() - st < 1.0:
get_runtime() # warmup
warmup_runtime = get_runtime()
num_repeats = max(1, int(1000 / warmup_runtime))
times = get_runtime(num_repeats)
return np.mean(times)
@pytest.mark.parametrize(
"m, in_features, out_features, bias",
[
(1, 1024, 1024, False),
(1024, 1024, 1024, False),
],
)
def test_profile_performance(m, in_features, out_features, bias):
linear_bitblas = BitBLASLinear(
in_features,
out_features,
bias=bias,
dtype=torch.float16,
opt_M=m,
enable_tuning=False,
).cuda()
with torch.no_grad():
input_data = torch.randn(m, in_features, dtype=torch.float16).cuda()
torch_latency = profile(linear_bitblas, input_data)
bitblas_latency = linear_bitblas.bitblas_matmul.profile_latency()
print(f"torch_latency: {torch_latency}, bitblas_latency: {bitblas_latency}")
assert (abs(torch_latency - bitblas_latency) / torch_latency <
0.1), f"torch_latency: {torch_latency}, bitblas_latency: {bitblas_latency}"
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/integration/pytorch/test_bitblas_linear.py/0
|
{
"file_path": "BitBLAS/integration/pytorch/test_bitblas_linear.py",
"repo_id": "BitBLAS",
"token_count": 1386
}
| 163 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .arch_base import TileDevice
from .cuda import *
from .cpu import *
def get_arch(target: tvm.target.Target) -> TileDevice:
if target.kind.name == "cuda":
return CUDA(target)
elif target.kind.name == "llvm":
return CPU(target)
else:
raise ValueError(f"Unsupported target: {target.kind.name}")
|
BitBLAS/python/bitblas/base/roller/arch/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/arch/__init__.py",
"repo_id": "BitBLAS",
"token_count": 149
}
| 164 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Apply ScheduleRules onto an IRModule to generate default schedules without tuning,
or a space for MetaSchedule tuning
"""
from typing import List, Optional, Dict
import os
import shutil
import tempfile
import os.path as osp
import tvm
from tvm import tir
from tvm import meta_schedule as ms
from tvm.ir import IRModule
from tvm.ir.transform import PassContext, module_pass
from tvm.target import Target
from .schedule_rule import ScheduleRule
from ..base.analysis import check_func_with_dynamic
from .utils import fast_tune, fast_tune_with_dynamic_range
import logging
logger = logging.getLogger(__name__)
def _is_scheduled(func: tir.PrimFunc) -> bool:
if not isinstance(func, tir.PrimFunc):
return False
if not func.attrs:
return False
if "tir.is_scheduled" not in func.attrs:
return False
return func.attrs["tir.is_scheduled"] == 1
@module_pass(opt_level=0, name="ApplyDefaultSchedule")
class ApplyDefaultSchedule: # pylint: disable=too-few-public-methods
"""A IRModule pass that applies a list of ScheduleRules to all PrimFuncs in the module."""
def __init__(self, *rules: ScheduleRule):
"""Construct a new ApplyDefaultSchedule pass.
Parameters
----------
*rules : ScheduleRule
The ScheduleRules to apply to all PrimFuncs in the module.
"""
self.rules = list(rules)
def transform_module( # pylint: disable=missing-function-docstring
self,
mod: IRModule,
_: PassContext,
) -> IRModule:
target = Target.current(allow_none=False)
updated_functions = {}
for g_var, func in mod.functions_items():
if isinstance(func, tir.PrimFunc) and not _is_scheduled(func):
sch = _apply_rules(func, target, self.rules, tunable=False)
if sch is not None:
assert len(sch) == 1
updated_functions[g_var] = (sch[0].mod["main"].with_attr("tir.is_scheduled", 1))
for g_var, func in updated_functions.items():
mod[g_var] = func
return mod
@module_pass(opt_level=0, name="ApplyFastTuning")
class ApplyFastTuning: # pylint: disable=too-few-public-methods
"""A IRModule pass that applies a list of ScheduleRules to all PrimFuncs in the module."""
def __init__(
self,
topk: int = 10,
target: Optional[Target] = None,
parallel_build: bool = True,
meta_database_dir: str = None,
whitelist: Optional[List[str]] = None,
dynamic_range: Optional[Dict[str, List[int]]] = None,
):
"""Construct a new ApplyFastTuning pass.
Parameters
----------
meta_database : str
The path of database.
dynamic_range : Dict[str, List[int]]
Use for generate kernel based on dynamic range.
"""
if whitelist is None:
whitelist = []
if dynamic_range is None:
dynamic_range = {}
self.topk = topk
self.target = Target.current() if target is None else target
self.parallel_build = parallel_build
self.meta_database_dir = meta_database_dir
self.whitelist = whitelist
self.dynamic_range = dynamic_range
self.temp_dir = tempfile.TemporaryDirectory()
path_workload = osp.join(self.temp_dir.name, "database_workload.json")
path_tuning_record = osp.join(self.temp_dir.name, "database_tuning_record.json")
self.cache_meta_database = ms.database.JSONDatabase(
path_workload, path_tuning_record, module_equality="structural")
def _in_white_list(self, func_name: str) -> bool:
if len(self.whitelist) == 0:
return True
return any([name in func_name for name in self.whitelist])
def transform_module( # pylint: disable=missing-function-docstring
self,
mod: IRModule,
_: PassContext,
) -> IRModule:
target = self.target
updated_functions = {}
for g_var, func in mod.functions_items():
if isinstance(func, tir.PrimFunc) and not _is_scheduled(func):
if not self._in_white_list(g_var.name_hint):
continue
normalize_mod_func_ = tvm._ffi.get_global_func("tvm.meta_schedule.normalize_mod")
_normalized_func_mod = normalize_mod_func_(func)
if self.cache_meta_database.has_workload(_normalized_func_mod):
tuning_record = self.cache_meta_database.query_tuning_record(
_normalized_func_mod,
target,
g_var.name_hint,
)
if tuning_record:
trace = tuning_record.trace
sch = tvm.tir.Schedule(func)
trace.apply_to_schedule(sch, remove_postproc=False)
updated_functions[g_var] = sch.mod["main"].with_attr("tir.is_scheduled", 1)
continue
if check_func_with_dynamic(func):
dispatch_mod = fast_tune_with_dynamic_range(
func,
target=target,
topk=self.topk,
parallel_build=self.parallel_build,
global_symbol=g_var.name_hint,
dynamic_range=self.dynamic_range,
)
if dispatch_mod:
for g, f in dispatch_mod.functions_items():
if g.name_hint == g_var.name_hint:
# avoid duplicated global symbol
updated_functions[g_var] = f.without_attr(
"global_symbol").with_attr("tir.is_scheduled", 1)
else:
updated_functions[g] = f.with_attr("tir.is_scheduled", 1)
# cannot reuse meta database as it cannot be recorvered from the trace
workload = self.cache_meta_database.commit_workload(_normalized_func_mod)
else:
# otherwise is static shape analysis
_, best = fast_tune(
func,
target=target,
topk=self.topk,
parallel_build=self.parallel_build,
)
if best is not None:
updated_functions[g_var] = best.sch.mod["main"].with_attr(
"tir.is_scheduled", 1)
workload = self.cache_meta_database.commit_workload(_normalized_func_mod)
# only record the best schedule
self.cache_meta_database.commit_tuning_record(
ms.database.TuningRecord(
best.sch.trace,
workload,
[best.latency],
target,
ms.arg_info.ArgInfo.from_prim_func(func=best.sch.mod["main"]),
))
for g_var, func in updated_functions.items():
mod[g_var] = func
# copy database
if self.meta_database_dir is not None:
if not osp.exists(self.meta_database_dir):
os.makedirs(self.meta_database_dir)
# TODO(lei): maybe another way to copy the database
shutil.copytree(self.temp_dir.name, self.meta_database_dir, dirs_exist_ok=True)
return mod
def __del__(self):
# clean up the temp cache
self.temp_dir.cleanup()
def _apply_rules(
func: tir.PrimFunc,
target: Target,
rules: List[ScheduleRule],
tunable: bool,
) -> Optional[List[tir.Schedule]]:
for rule in rules:
try:
space = rule.apply(func, target, tunable)
except Exception:
logger.debug(f"[BitBLAS][Error] applying rule {rule} failed")
space = None
if space is None:
continue
if isinstance(space, tir.Schedule):
space = [space]
return space
return None
|
BitBLAS/python/bitblas/base/transform.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/transform.py",
"repo_id": "BitBLAS",
"token_count": 4176
}
| 165 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring, invalid-name
"""A GEMM schedule rule for GPU operators."""
from typing import Literal, Optional, List
from tvm import tir, DataType
from tvm.target import Target
from ..base.roller.rasterization import NoRasterization
from ..base import analysis
from .base import GPUScheduleRule
from .matmul_mma_dequantize import MatmulTensorizationMMAWithDequantizeInfo
from ..base.analysis import get_coalesced_veclen
from .matmul_analysis import (
auto_inline_consumer_chain,
is_transpose_block,
is_identity_block,
_collect_producers,
inline_transpose_block,
auto_inline_producers,
get_index_map,
get_reduction_blocks,
get_dequantize_block,
normalize_to_matmul,
get_propagate_map,
)
def get_index_map_3d(index_map, l=16, r=16): # noqa: E741
def index_map_3d(b, i, j):
return (
b,
i // l,
j // r,
*index_map(i % l, j % r),
)
return index_map_3d
def get_index_map_5d(index_map):
"""
for layout transformed gemm, the index map should be 5d
"""
def index_map_5d(b, i, j, ii, jj):
return (
b,
i,
j,
*index_map(ii, jj),
)
return index_map_5d
def get_warp_index_map(index_map, l=16, r=16, is_5d=False): # noqa: E741
if is_5d:
return get_index_map_5d(index_map)
return get_index_map_3d(index_map, l, r)
class MatmulTensorizationMMA(GPUScheduleRule):
"""
The schedule rule for float16 tensor core matmul computation.
func with attr 'dlight.do_not_tensorize' will not be tensorized.
"""
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Optional[tir.Schedule]:
if "dequantize_info" in func.attrs:
dequantize_rule = MatmulTensorizationMMAWithDequantizeInfo()
return dequantize_rule.apply(func, target, False)
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
# We first inline all transpose blocks for later analysis of transposed A and B
blocks = inline_transpose_block(sch, blocks)
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
dequantize_block = get_dequantize_block(sch, blocks)
main_block = reduction_blocks[0]
main_block_stmt = sch.get(main_block)
# Supported data types:
# fp16, fp16, fp16: fp16 precision
# fp16, fp16, fp32: fp16 mixed precision
dtype_a = main_block_stmt.reads[0].buffer.dtype
dtype_b = main_block_stmt.reads[1].buffer.dtype
dtype_c = main_block_stmt.writes[0].buffer.dtype
if dtype_a != dtype_b:
return None
# Get index maps
index_maps = get_index_map(main_block_stmt)
if index_maps is None:
return None
matmul_index_map, a_index_map, b_index_map, c_index_map = index_maps
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
# Tensorization by hardware intrinsics
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_mma_intrin_group, shared_16x16_to_mma_32x8_layout,
)
# tile size
block_m, block_n, block_k = 128, 128, 32
# tensor core intrinsic size
micro_size_m, micro_size_n, micro_size_k = 16, 16, 16
# thread size
# thread_x == warp_size
thread_z, thread_y, thread_x = 2, 2, 32
vector_size = 8
unroll_depth = 4 # noqa: F841
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]
block = sch.reindex(main_block, ("read", 0))
sch.transform_layout(block, ("write", 0), a_index_map)
is_transpose_a = is_transpose_block(sch.get(block))
block = sch.reindex(main_block, ("read", 1))
sch.transform_layout(block, ("write", 0), b_index_map)
is_transpose_b = is_identity_block(sch.get(block))
block = sch.reindex(main_block, ("write", 0))
sch.transform_layout(block, ("read", 0), c_index_map)
sch.transform_block_layout(main_block, matmul_index_map)
batch, i, j, k = sch.get_loops(main_block)
swizzle_factor_for_l2_m = [1, None]
swizzle_factor_for_l2_n = [1, None]
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
swizzle_factor_for_l2_m[0] * block_m,
swizzle_factor_for_l2_n[0] * block_n,
block_k,
],
)
# Step 3. Reorder loops for tiling
# Step 3.1 inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_m])
j, j_inner = sch.split(j, factors=[None, micro_size_n])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = main_block
block_outer = sch.blockize(i_inner)
# Step 3.2 outer loops for tiling
# split factors for i, j, and k
micro_block_cnt_in_warp_m = block_m // thread_z // micro_size_m
micro_block_cnt_in_warp_n = block_n // thread_y // micro_size_n
micro_block_cnt_in_warp_k = block_k // micro_size_k
i_factors = swizzle_factor_for_l2_m + [thread_z, micro_block_cnt_in_warp_m]
j_factors = swizzle_factor_for_l2_n + [thread_y, micro_block_cnt_in_warp_n]
k_factors = [None, micro_block_cnt_in_warp_k]
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, factors=k_factors)
sch.reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3)
block_axis = sch.fuse(batch, i0, j0, i1, j1)
sch.bind(block_axis, "blockIdx.x")
sch.bind(i2, "threadIdx.z")
sch.bind(j2, "threadIdx.y")
# Step 4. Read/write to shared mem and register
def fetch_input(block_outer, read_buffer_idx, tensor_name: Literal["A", "B"], is_transpose):
# 1) Read to shared memory
block_read_smem = sch.cache_read(block_outer, read_buffer_idx, "shared.dyn")
sch.compute_at(block_read_smem, k0)
auto_inline_producers(sch, block_read_smem,
[dequantize_block] if dequantize_block else [])
# For transposed read, we directly load transposed tensor from global
# Then use ldmatrix.trans to handle transpose later
if (tensor_name == "A" and is_transpose) or (tensor_name == "B" and not is_transpose):
# specifical handle transpose read (for NN matmul or TT matmul)
v0, v1 = sch.get_loops(block_read_smem)[-2:]
sch.reorder(v1, v0)
sch.transform_layout(block_read_smem, ("write", 0), lambda b, i, j: (b, j, i))
# bind loops
fused = sch.fuse(*sch.get_loops(block_read_smem)[-2:])
f0, f1, f2, f3, f4 = sch.split(fused, [None, thread_z, thread_y, thread_x, vector_size])
sch.bind(f1, "threadIdx.z")
sch.bind(f2, "threadIdx.y")
sch.bind(f3, "threadIdx.x")
sch.vectorize(f4)
# swizzling
sch.annotate(block_read_smem, ann_key="permuted_layout", ann_val=1)
# 2) Read to register
block_read_reg = sch.cache_read(block_outer, read_buffer_idx, "warp")
sch.compute_at(block_read_reg, k1)
# bind_loops
micro_size_spatial = micro_size_m if tensor_name == "A" else micro_size_n
micro_size_1, micro_size_2 = ((micro_size_spatial,
micro_size_k) if not is_transpose else
(micro_size_k, micro_size_spatial))
v00, v01 = sch.split(sch.get_loops(block_read_reg)[-2], [None, micro_size_1])
v10, v11 = sch.split(sch.get_loops(block_read_reg)[-1], [None, micro_size_2])
sch.reorder(v00, v10, v01, v11)
# reorder read axis to match the layout of ldmatrix
sch.transform_layout(
block_read_reg,
("write", 0),
lambda v0, v1, v2: (
v0,
v1 // micro_size_1,
v2 // micro_size_2,
*shared_16x16_to_mma_32x8_layout(v1 % micro_size_1, v2 % micro_size_2),
),
)
# swizzling
mma_read_block = sch.blockize(sch.get_loops(block_read_reg)[-2])
sch.annotate(mma_read_block, ann_key="permuted_layout", ann_val=1)
return block_read_smem, block_read_reg
block_read_a, block_read_reg_a = fetch_input(block_outer, 0, "A", is_transpose_a)
block_read_b, block_read_reg_b = fetch_input(block_outer, 1, "B", is_transpose_b)
# Write to register, and then smem
def store_output(block_outer, write_buffer_idx):
# 1) Write to shared memory
block_write_smem = sch.cache_write(block_outer, write_buffer_idx, "shared.dyn")
sch.reverse_compute_at(block_write_smem, block_axis)
auto_inline_consumer_chain(sch, block_write_smem)
# bind loops
fused = sch.fuse(*sch.get_loops(block_write_smem)[-2:])
f0, f1, f2 = sch.split(fused, [None, thread_x, vector_size])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
# 2) Write to register
block_write_reg = sch.cache_write(block_outer, write_buffer_idx, "warp")
# bind loops
v0, v1, v2 = sch.get_loops(block_write_reg)[-3:]
v11, v12, v13 = sch.split(v1, factors=[thread_z, None, micro_size_m])
v21, v22, v23 = sch.split(v2, factors=[thread_y, None, micro_size_n])
sch.reorder(v11, v21, v12, v22, v13, v23)
sch.bind(v11, "threadIdx.z")
sch.bind(v21, "threadIdx.y")
# reorder write axis to match the layout of ldmatrix
sch.transform_layout(
block_write_reg,
("read", 0),
lambda v0, v1, v2: (
v0,
v1 // micro_size_m,
v2 // micro_size_n,
*shared_16x16_to_mma_32x8_layout(v1 % micro_size_m, v2 % micro_size_n),
),
)
return block_write_smem, block_write_reg
_, block_write_reg = store_output(block_outer, 0)
# Step 5. Schedule tensor core computation
block_init = sch.decompose_reduction(block_outer, k0)
block_init_inner = sch.get_child_blocks(block_init)[0]
intrin_group = get_mma_intrin_group(
load_scope="shared.dyn",
store_scope="shared.dyn",
a_dtype=str(dtype_a),
b_dtype=str(dtype_b),
out_dtype=str(dtype_c),
trans_a=is_transpose_a,
trans_b=is_transpose_b,
not_use_mma_store_intrinic=False,
)
sch.tensorize(sch.get_loops(block_init_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(block_read_reg_a)[-2], intrin_group["load_a"])
sch.tensorize(sch.get_loops(block_read_reg_b)[-2], intrin_group["load_b"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
sch.tensorize(sch.get_loops(block_write_reg)[-2], intrin_group["store"])
# Step 6. Async pipeline
sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 3])
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0])
# Step 7. Handle dequantize block
# Now we just add a dummy kernel to compute dequantize
if dequantize_block is not None:
auto_inline_producers(sch, dequantize_block)
loops = sch.get_loops(dequantize_block)
loop = sch.fuse(*loops)
v0, v1, v2, v3 = sch.split(loop, [None, 128, 2, 4])
sch.bind(v0, "blockIdx.x")
sch.bind(v1, "threadIdx.x")
sch.unroll(v2)
sch.vectorize(v3)
return sch
def apply_config( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
config,
) -> Optional[tir.Schedule]:
if "dequantize_info" in func.attrs:
dequantize_rule = MatmulTensorizationMMAWithDequantizeInfo()
return dequantize_rule.apply_config(func, config)
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_mma_intrin_group,)
import_source: List[str] = []
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
output_blocks = [sch.get(block) for block in sch.get_output_blocks(root_block)]
def check_require_cache(func: tir.PrimFunc, config):
conditions: List[bool] = []
# check if has dynamic symbolic
def check_has_dynamic(func: tir.PrimFunc):
for param in func.params:
if param not in func.buffer_map:
continue
arg = func.buffer_map[param]
for i in arg.shape:
if isinstance(i, tir.Var):
return True
return False
conditions.append(check_has_dynamic(func))
# check if has post process
conditions.append(sch.get(main_block) not in output_blocks)
# check if not use async copy
conditions.append(config.use_async is False)
return any(conditions)
cache_write_required = check_require_cache(func, config=config)
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]/B[S, K, J]
if not (func.attrs is not None and "dlight.tensorcore_prenormlized" in func.attrs.keys()):
sch = normalize_to_matmul(sch, main_block, ["a", "a", "a"])
shared_scope = config.shared_scope
intrin_info = config.intrin_info
intrin_group = get_mma_intrin_group(
load_scope=shared_scope,
store_scope=shared_scope if cache_write_required else "global",
a_dtype=intrin_info.in_dtype,
b_dtype=intrin_info.in_dtype,
out_dtype=intrin_info.out_dtype,
trans_a=intrin_info.trans_a,
trans_b=intrin_info.trans_b,
smooth_a=intrin_info.smooth_a,
smooth_b=intrin_info.smooth_b,
not_use_mma_store_intrinic=False,
)
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
warp_row_tiles = config.warp[0]
warp_col_tiles = config.warp[1]
block_row_warps = config.block[0] // warp_row_tiles
block_col_warps = config.block[1] // warp_col_tiles
stage = config.pipeline_stage
use_async = config.use_async
chunk = config.rstep[0]
# tensor core intrinsic size
micro_size_x, micro_size_y, micro_size_k = intrin_group["micro_kernel"]
# get the axis for layout transform
def get_axis(l, r, trans): # noqa: E741
return (r, l) if trans else (l, r) # noqa: E741
a_lr = get_axis(micro_size_x, micro_size_k, intrin_info.trans_a)
b_lr = get_axis(micro_size_k, micro_size_y, intrin_info.trans_b)
def can_enable_swizzle(dtype: str, smooth: bool):
# inject_permuted_layout only support float16 currently
if dtype == "float16" or dtype == "int8":
if chunk * DataType(dtype).bits != 512:
# currently the swizzle rule only support 512 bit.
return False
# if we use smooth layout, we don't need to do swizzling
return not smooth
return False
can_swizzle_a = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_a)
can_swizzle_b = can_enable_swizzle(intrin_info.in_dtype, intrin_info.inter_transform_b)
warp_size = 32
i_factors, j_factors, k_factors = (
[None, 1, block_row_warps, warp_row_tiles // micro_size_x],
[1, None, block_col_warps, warp_col_tiles // micro_size_y],
[None, chunk // micro_size_k],
)
num_ty = i_factors[2]
num_tz = j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3)
block_idy = sch.fuse(i0, j0)
block_idx = sch.fuse(i1, j1)
thread_idy = i2
thread_idz = j2
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
sch.bind(thread_idz, "threadIdx.z")
# rewrite smooth layout of shared memory
def smooth_smem_layout_rewrite(block, scope, l=16, r=16, enable=True): # noqa: E741
if not enable:
return
sch.transform_layout(
block,
scope,
lambda b, i, j: (
b,
i // l,
j // r,
i % l,
j % r,
),
)
smooth_smem_layout_rewrite(
block_outer, ("read", 0), *a_lr, enable=intrin_info.inter_transform_a)
smooth_smem_layout_rewrite(
block_outer, ("read", 1), *b_lr, enable=intrin_info.inter_transform_b)
smooth_smem_layout_rewrite(block_outer, ("write", 0), enable=True)
def fetch_to_shared(block, idx, vec_len, can_swizzle=False, is_smooth=False, trans=False):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0, preserve_unit_loops=True)
ndim = len(sch.get(block_read).iter_vars)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
f_0, f_1, f_2, f_3, f_4 = sch.split(
fused, factors=[num_ty, num_tz, None, warp_size, vec_len])
sch.bind(f_3, "threadIdx.x")
sch.bind(f_1, "threadIdx.z")
sch.bind(f_0, "threadIdx.y")
sch.vectorize(f_4)
sch.unroll(f_2)
# Apply Swizzling
sch.annotate(block_read, ann_key="permuted_layout", ann_val=can_swizzle)
# if not, apply padding to alleviate bank conflict
if not (can_swizzle or is_smooth):
pad_offset = 8 if intrin_info.in_dtype == "float16" else 16
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=pad_offset)
sch.annotate(f_2, "pragma_unroll_explicit", False)
return block_read
if len(config.vectorize.values()) < 2:
return None
a_g2s = fetch_to_shared(
block_outer,
0,
vec_len=list(config.vectorize.values())[0],
can_swizzle=can_swizzle_a,
is_smooth=intrin_info.smooth_a,
trans=intrin_info.trans_a,
)
b_g2s = fetch_to_shared(
block_outer,
1,
vec_len=list(config.vectorize.values())[1],
can_swizzle=can_swizzle_b,
is_smooth=intrin_info.smooth_b,
trans=intrin_info.trans_b,
)
# rewrite global smooth layout
def smooth_gmem_layout_rewrite(sch, block, enable=True, trans=False, matrix_name="A"):
if not enable:
return
# step1: find the first producer block
# Notes: we assume the layout propagate happens in the first producer block
# otherwise, the layout transform will have no effect as it will transform both
# read and write buffer
producers = _collect_producers(sch, block)
g2s_block = a_g2s if matrix_name == "A" else b_g2s
propagate_block: tir.Block = (producers[-1] if len(producers) > 0 else g2s_block)
# step2: transform the layout with inverse permutation
intra_indexmap, _ = get_propagate_map(
trans=trans, dtype=intrin_info.in_dtype, matrix_name=matrix_name)
def inverse_permutation(i, j, ii, jj):
return (i, j, *intra_indexmap.map_indices([ii, jj]))
sch.transform_layout(propagate_block, ("read", 0), inverse_permutation)
smooth_gmem_layout_rewrite(
sch, a_g2s, intrin_info.smooth_a, intrin_info.trans_a, matrix_name="A")
smooth_gmem_layout_rewrite(
sch, b_g2s, intrin_info.smooth_b, intrin_info.trans_b, matrix_name="B")
auto_inline_producers(sch, a_g2s)
auto_inline_producers(sch, b_g2s)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "warp")
B_mat = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_mat, k1)
sch.compute_at(B_mat, k1)
# create write cache to store matrix from wmma fragments to shared memory and global memory
if cache_write_required:
accumulator_shared_to_global = sch.cache_write(block_outer, 0, shared_scope)
store = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(store, j2)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, micro_size_x], preserve_unit_iters=False)
j0, j1 = sch.split(j, factors=[None, micro_size_y], preserve_unit_iters=False)
sch.reorder(i0, j0, i1, j1)
if cache_write_required:
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
sch.reverse_compute_at(
accumulator_shared_to_global,
sch.get_loops(store)[-5],
preserve_unit_loops=True,
)
vec_len = get_coalesced_veclen(sch.get(accumulator_shared_to_global))
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-5:])
f0, f1, f2 = sch.split(fused, factors=[None, warp_size, vec_len])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
sch.unroll(f0)
sch.annotate(f0, "pragma_unroll_explicit", False)
else:
auto_inline_consumer_chain(sch, store)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
index_map_a, index_map_b, index_map_c = intrin_group["index_map"]
sch.transform_layout(
A_mat,
("write", 0),
get_warp_index_map(index_map_a, *a_lr, intrin_info.inter_transform_a),
)
sch.transform_layout(
B_mat,
("write", 0),
get_warp_index_map(index_map_b, *b_lr, intrin_info.inter_transform_b),
)
sch.transform_layout(
store,
("read", 0),
get_warp_index_map(index_map_c, is_5d=True),
)
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, a_lr[0]])
j0, j1 = sch.split(j, factors=[None, a_lr[1]])
sch.reorder(i0, j0, i1, j1)
ba = sch.blockize(i1)
sch.annotate(ba, ann_key="permuted_layout", ann_val=can_swizzle_a)
sch.tensorize(ba, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, b_lr[0]])
j0, j1 = sch.split(j, factors=[None, b_lr[1]])
sch.reorder(i0, j0, i1, j1)
bb = sch.blockize(i1)
sch.annotate(bb, ann_key="permuted_layout", ann_val=can_swizzle_b)
sch.tensorize(bb, intrin_group["load_b"])
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
tensorize_init_store_compute()
if stage > 1:
sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, stage - 1])
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
if use_async:
sch.annotate(k0, "software_pipeline_async_stages", [0])
# plan rasteration
if not isinstance(config.rasterization_plan, NoRasterization):
device_func, invoke_func = config.rasterization_plan.get_code()
import_source.append(device_func)
sch.annotate(
sch.get_loops(block_init_c)[-2],
ann_key="inject_customized_code_prepend",
ann_val=invoke_func)
# plan import source
if len(import_source) > 0:
sch.annotate(
thread_idz,
ann_key="pragma_import_c",
ann_val=("\n").join(import_source),
)
return sch
|
BitBLAS/python/bitblas/gpu/matmul_mma.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/matmul_mma.py",
"repo_id": "BitBLAS",
"token_count": 13885
}
| 166 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from bitblas.gpu.matmul_analysis import get_propagate_map
from ..operator import TransformKind
from typing import Literal
from tvm import te, IRModule
def select_implementation(
M: int,
N: int,
datatype: Literal["float16"] = "float16",
transpose_matrix: bool = True,
group_size: int = -1,
propagate_kind: TransformKind = TransformKind.NonTransform,
target_instruction: Literal["nvidia-mma"] = "nvidia-mma",
):
if target_instruction != "nvidia-mma":
raise ValueError("Currently only support nvidia-mma instruction")
if propagate_kind < TransformKind.IntraWarpTransform:
raise ValueError("Currently only support propagate_kind >= IntraWarpTransform")
if transpose_matrix is not True:
raise ValueError("Currently only support transpose_matrix == True")
# This is trick to get the basic tile size for the current datatype
# as for nvidia tensorcore instruction, the basic tile size is 16x16/16x32 for float16/int8
l = r = 16 # noqa: E741
if datatype in ["int8", "e4m3_float8", "e5m2_float8"]:
l, r = 16, 32 # noqa: E741
if group_size == -1:
group_size = N
intra_index_map, inverse_indexmap = get_propagate_map(
transpose_matrix, dtype=datatype, matrix_name=propagate_kind)
inp = te.placeholder((M, N // group_size), name="inp", dtype=datatype)
def fcompute(n, k):
rl, rr = n, k
warp_i, warp_j = rl % l, rr % r
spatial_i, spatial_j = rl // l, rr // r
if propagate_kind >= TransformKind.IntraWarpTransform:
warp_i, warp_j = intra_index_map.map_indices([warp_i, warp_j])
new_index = (spatial_i * l + warp_i, (spatial_j * r + warp_j) // group_size)
return inp[new_index]
inp_prmt = te.compute(
(M, N // group_size),
fcompute,
name="intra_warp_permutate",
)
args = [inp, inp_prmt]
func = te.create_prim_func(args)
return IRModule.from_expr(func)
|
BitBLAS/python/bitblas/ops/impl/param_permutate_impl.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/impl/param_permutate_impl.py",
"repo_id": "BitBLAS",
"token_count": 836
}
| 167 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
def match_global_kernel(source: str) -> int:
pattern = r"__global__\s+void\s+[__launch_bounds__\(\d+\)\s+]\w+"
matched = re.findall(pattern, source)
assert len(matched) > 1 # may have statement before kernel
return source.index(matched[0])
def tensor_replace_dp4a(source: str) -> str:
# as under block reduction in tir dsl, the dp4a tensorize will fail, so we should do dp4a in post processor.
# TODO(lei): this is a stuff that should be fixed in the tvm in the future
pattern = r"""for\s*\(int\s*(?P<k_var>\w+)\s*=\s*0;\s*\1\s*<\s*4;\s*\+\+\1\)\s*\{\s*(?P<c_var>\w+)\[0\]\s*=\s*\(\2\[0\]\s*\+\s*\(\(\(int\)(?P<a_var>\w+)\[\(\((?P<idx_a_var>\w+)\s*\*\s*4\)\s*\+\s*\1\)\]\)\s*\*\s*\(\(int\)(?P<b_var>\w+)\[\(\((?P<idx_b_var>\w+)\s*\*\s*4\)\s*\+\s*\1\)\]\)\)\);\s*\}"""
replacement = (r"""\2[0] = __dp4a(*(int *)&\3[((\4 * 4))],*(int *)&\5[((\6 * 4))], \2[0]);""")
source = re.sub(pattern, replacement, source)
return source
def tensor_remove_make_int4(source: str) -> str:
# remove make_int4 with 16 signed char arguments
# TODO(lei): this is a stuff that should be fixed in the tvm in the future
source = source.replace(
"make_int4((signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0, (signed char)0)",
"make_int4(0, 0, 0, 0)",
)
return source
|
BitBLAS/python/bitblas/utils/post_process.py/0
|
{
"file_path": "BitBLAS/python/bitblas/utils/post_process.py",
"repo_id": "BitBLAS",
"token_count": 735
}
| 168 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import tvm
from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy
from bitblas.base.roller.arch import CUDA
from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags
from bitblas.gpu import Matmul
from bitblas.ops.impl.convolution2d_impl import conv2d_nhwc_hwio, conv2d_nhwc_ohwi
from bitblas.base.utils import apply_and_build
import time
benchmark_sets = [
# (prim_func, input_args, default_bitblas_schedule),
(conv2d_nhwc_hwio, (128, 64, 224, 224, 3, 7, 7, 2, 1, 3, "float16", "float16"), Matmul),
(conv2d_nhwc_ohwi, (128, 64, 56, 56, 64, 3, 3, 1, 1, 1, "float16", "float16"), Matmul),
(conv2d_nhwc_hwio, (128, 64, 56, 56, 64, 1, 1, 1, 1, 1, "float16", "float16"), Matmul),
(conv2d_nhwc_ohwi, (128, 64, 56, 56, 64, 1, 1, 1, 1, 1, "float16", "float16"), Matmul),
(conv2d_nhwc_ohwi, (128, 128, 28, 28, 128, 3, 3, 1, 1, 1, "float16", "float16"), Matmul),
(conv2d_nhwc_hwio, (128, 256, 14, 14, 128, 3, 3, 2, 1, 1, "float16", "float16"), Matmul),
(conv2d_nhwc_ohwi, (128, 256, 14, 14, 128, 1, 1, 2, 1, 1, "float16", "float16"), Matmul),
]
benchmark_results = {}
for get_prim_func, input_args, d_schedule in benchmark_sets:
ir_module = get_prim_func(*input_args)
func = ir_module["main"]
target = tvm.target.Target("nvidia/nvidia-a100")
arch = CUDA(target)
policy = DefaultPolicy(func=func, arch=arch)
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception as e:
print(f"Failed to get tensorized function and tags: {e}")
tags = None
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(20)
tune_start = time.time()
cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)
fast_tune_time = time.time() - tune_start
print("[BitBLAS] The best latency of top 1 is {:.3f} ms".format(cpresults[0].latency * 1e3))
print("[BitBLAS] The best latency of top 20 is {:.3f} ms".format(best.latency * 1e3))
# evaluate the performance of the default schedule
rule = d_schedule()
default_tune_start = time.time()
sch_default = rule.apply(func, target, False)
with tvm.transform.PassContext(config={"tir.use_async_copy": True}):
mod_default = tvm.build(sch_default.mod["main"], target="cuda")
default_tune_time = time.time() - default_tune_start
args = func.buffer_map.values()
profile_tensors = []
for arg in args:
profile_tensors.append(
tvm.nd.array(
np.random.uniform(0, 1, [int(i) for i in arg.shape]).astype(arg.dtype),
device=arch.device,
))
timer_cuda_mod = mod_default.time_evaluator(mod_default.entry_name, arch.device, number=5)
t = timer_cuda_mod(*profile_tensors).mean
print("Time cost of BitBLAS default schedule: {:.3f} ms".format(t * 1e3))
profile_config = {
f"{get_prim_func.__name__}-{'-'.join([str(i) for i in input_args])}": {
"fast_bitblas_top20_tune_time": fast_tune_time,
"fast_bitblas_top1_latency": cpresults[0].latency * 1e3,
"fast_bitblas_top20_latency": best.latency * 1e3,
"default_bitblas_tune_time": default_tune_time,
"default_bitblas_latency": t * 1e3,
}
}
benchmark_results.update(profile_config)
headers = [
"PrimFunc",
"Input Arguments",
"FastDLight Top20 Tune Time",
"FastDLight Top1 Latency",
"FastDLight Top20 Latency",
"DefaultDLight Tune Time",
"DefaultDLight Latency",
]
col_width = (max(len(word) for row in [headers] + list(profile_config.values()) for word in row) + 2
) # padding
print("".join(word.ljust(col_width) for word in headers))
print("-" * col_width * len(headers))
for config, values in benchmark_results.items():
args = config.split("-")
func_name = args[0]
input_args = "-".join(args[1:])
row = [
func_name,
input_args,
f" {str(values['fast_bitblas_top20_tune_time'])} s",
f"{values['fast_bitblas_top1_latency']:.3f} ms",
f"{values['fast_bitblas_top20_latency']:.3f} ms",
str(values["default_bitblas_tune_time"]),
f"{values['default_bitblas_latency']:.3f} ms",
]
print("".join(word.ljust(col_width) for word in row))
|
BitBLAS/testing/python/dsl/test_auto_normalized_tensorcore.py/0
|
{
"file_path": "BitBLAS/testing/python/dsl/test_auto_normalized_tensorcore.py",
"repo_id": "BitBLAS",
"token_count": 1995
}
| 169 |
date ; hostname ; pwd
export MASTER_ADDR=$HOSTNAME
export MASTER_PORT=19800
export NODE_RANK=0
EXP_LR_ARRAY=(1e-5 1e-5 2e-5 2e-5 1e-5 1e-5 2e-5 2e-5)
EXP_LF_ARRAY=(True False True False True False True False)
EXP_GN_ARRAY=(cifar10 cifar10 cifar10 cifar10 cifar100 cifar100 cifar100 cifar100)
EXP_RB_ARRAY=(288 224 288 224 288 224 288 224)
for i in {0..7}
do
EXP_LR=${EXP_LR_ARRAY[$i]}
EXP_LF=${EXP_LF_ARRAY[$i]}
EXP_GN=${EXP_GN_ARRAY[$i]}
EXP_RB=${EXP_RB_ARRAY[$i]}
echo $MASTER_ADDR, $MASTER_PORT, $NODE_RANK, $EXP_NODES, $EXP_LR, $EXP_LF, $EXP_GN, $EXP_RB
RUN_NAME=""$EXP_LR"_"$EXP_LF"_10"
python run_cifar.py with run_name=$RUN_NAME learning_rate=$EXP_LR load_flag=$EXP_LF group_name=$EXP_GN resolution_before=$EXP_RB
RUN_NAME=""$EXP_LR"_"$EXP_LF"_100"
python run_cifar.py with run_name=$RUN_NAME learning_rate=$EXP_LR load_flag=$EXP_LF group_name=$EXP_GN resolution_before=$EXP_RB max_epoch=100
done
date
|
BridgeTower/scripts/ftfpt_cifar.sh/0
|
{
"file_path": "BridgeTower/scripts/ftfpt_cifar.sh",
"repo_id": "BridgeTower",
"token_count": 455
}
| 170 |
from ..datasets import F30KCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
from torch.utils.data import DataLoader
class F30KCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return F30KCaptionKarpathyDataset
@property
def dataset_cls_no_false(self):
return F30KCaptionKarpathyDataset
@property
def dataset_name(self):
return "f30k"
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=0,
pin_memory=True,
collate_fn=self.train_dataset.collate,
)
return loader
def val_dataloader(self):
loader = DataLoader(
self.val_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=0,
pin_memory=True,
collate_fn=self.val_dataset.collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=0,
pin_memory=True,
collate_fn=self.test_dataset.collate,
)
return loader
|
BridgeTower/src/datamodules/f30k_caption_karpathy_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/f30k_caption_karpathy_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 716
}
| 171 |
from .base_dataset import BaseDataset
class VQAv2Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
# names = ["vqav2_train", "vqav2_trainable_val"] # ViLT
# names = ["vqav2_train", "vqav2_val"] # METER
## We fix a bug in ViLT & METER's write_vqa.py.
names = ["vqav2_train_fix", "vqav2_val_fix"] # METER_fix
# names = ["vqav2_train_fix", "vqav2_val_fix", "vgqa_coco_train", "vgqa_coco_val"] # + vgqa coco only
# names = ["vqav2_train_fix", "vqav2_val_fix", "vgqa_train", "vgqa_val"] # + vgqa all
elif split == "val":
# names = ["vqav2_rest_val"] # ViLT
# names = ["vqav2_val"] # METER
names = ["vqav2_val_fix"] # METER_fix
elif split == "test":
names = ["vqav2_test"] # evaluate the test-dev and test-std
# names = ["vqav2_test-dev"] # only evaluate the test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
def __getitem__(self, index):
image_tensor = self.get_image(index)["image"]
text = self.get_text(index)["text"]
index, question_index = self.index_mapper[index]
qid = self.table["question_id"][index][question_index].as_py()
if self.split != "test":
answers = self.table["answers"][index][question_index].as_py()
labels = self.table["answer_labels"][index][question_index].as_py()
scores = self.table["answer_scores"][index][question_index].as_py()
else:
answers = list()
labels = list()
scores = list()
return {
"image": image_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
|
BridgeTower/src/datasets/vqav2_dataset.py/0
|
{
"file_path": "BridgeTower/src/datasets/vqav2_dataset.py",
"repo_id": "BridgeTower",
"token_count": 1065
}
| 172 |
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 operations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
|
BridgeTower/src/transforms/randaug.py/0
|
{
"file_path": "BridgeTower/src/transforms/randaug.py",
"repo_id": "BridgeTower",
"token_count": 3588
}
| 173 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import argparse
import os
from util import util
import torch
import models
import data
import pickle
class BaseOptions:
def __init__(self):
self.initialized = False
def initialize(self, parser):
# experiment specifics
parser.add_argument(
"--name",
type=str,
default="label2coco",
help="name of the experiment. It decides where to store samples and models",
)
parser.add_argument(
"--gpu_ids", type=str, default="0", help="gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU"
)
parser.add_argument(
"--checkpoints_dir", type=str, default="./checkpoints", help="models are saved here"
)
parser.add_argument("--model", type=str, default="pix2pix", help="which model to use")
parser.add_argument(
"--norm_G",
type=str,
default="spectralinstance",
help="instance normalization or batch normalization",
)
parser.add_argument(
"--norm_D",
type=str,
default="spectralinstance",
help="instance normalization or batch normalization",
)
parser.add_argument(
"--norm_E",
type=str,
default="spectralinstance",
help="instance normalization or batch normalization",
)
parser.add_argument("--phase", type=str, default="train", help="train, val, test, etc")
# input/output sizes
parser.add_argument("--batchSize", type=int, default=1, help="input batch size")
parser.add_argument(
"--preprocess_mode",
type=str,
default="scale_width_and_crop",
help="scaling and cropping of images at load time.",
choices=(
"resize_and_crop",
"crop",
"scale_width",
"scale_width_and_crop",
"scale_shortside",
"scale_shortside_and_crop",
"fixed",
"none",
"resize",
),
)
parser.add_argument(
"--load_size",
type=int,
default=1024,
help="Scale images to this size. The final image will be cropped to --crop_size.",
)
parser.add_argument(
"--crop_size",
type=int,
default=512,
help="Crop to the width of crop_size (after initially scaling the images to load_size.)",
)
parser.add_argument(
"--aspect_ratio",
type=float,
default=1.0,
help="The ratio width/height. The final height of the load image will be crop_size/aspect_ratio",
)
parser.add_argument(
"--label_nc",
type=int,
default=182,
help="# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.",
)
parser.add_argument(
"--contain_dontcare_label",
action="store_true",
help="if the label map contains dontcare label (dontcare=255)",
)
parser.add_argument("--output_nc", type=int, default=3, help="# of output image channels")
# for setting inputs
parser.add_argument("--dataroot", type=str, default="./datasets/cityscapes/")
parser.add_argument("--dataset_mode", type=str, default="coco")
parser.add_argument(
"--serial_batches",
action="store_true",
help="if true, takes images in order to make batches, otherwise takes them randomly",
)
parser.add_argument(
"--no_flip",
action="store_true",
help="if specified, do not flip the images for data argumentation",
)
parser.add_argument("--nThreads", default=0, type=int, help="# threads for loading data")
parser.add_argument(
"--max_dataset_size",
type=int,
default=sys.maxsize,
help="Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.",
)
parser.add_argument(
"--load_from_opt_file",
action="store_true",
help="load the options from checkpoints and use that as default",
)
parser.add_argument(
"--cache_filelist_write",
action="store_true",
help="saves the current filelist into a text file, so that it loads faster",
)
parser.add_argument(
"--cache_filelist_read", action="store_true", help="reads from the file list cache"
)
# for displays
parser.add_argument("--display_winsize", type=int, default=400, help="display window size")
# for generator
parser.add_argument(
"--netG", type=str, default="spade", help="selects model to use for netG (pix2pixhd | spade)"
)
parser.add_argument("--ngf", type=int, default=64, help="# of gen filters in first conv layer")
parser.add_argument(
"--init_type",
type=str,
default="xavier",
help="network initialization [normal|xavier|kaiming|orthogonal]",
)
parser.add_argument(
"--init_variance", type=float, default=0.02, help="variance of the initialization distribution"
)
parser.add_argument("--z_dim", type=int, default=256, help="dimension of the latent z vector")
parser.add_argument(
"--no_parsing_map", action="store_true", help="During training, we do not use the parsing map"
)
# for instance-wise features
parser.add_argument(
"--no_instance", action="store_true", help="if specified, do *not* add instance map as input"
)
parser.add_argument(
"--nef", type=int, default=16, help="# of encoder filters in the first conv layer"
)
parser.add_argument("--use_vae", action="store_true", help="enable training with an image encoder.")
parser.add_argument(
"--tensorboard_log", action="store_true", help="use tensorboard to record the resutls"
)
# parser.add_argument('--img_dir',)
parser.add_argument(
"--old_face_folder", type=str, default="", help="The folder name of input old face"
)
parser.add_argument(
"--old_face_label_folder", type=str, default="", help="The folder name of input old face label"
)
parser.add_argument("--injection_layer", type=str, default="all", help="")
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, unknown = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
# modify dataset-related parser options
# dataset_mode = opt.dataset_mode
# dataset_option_setter = data.get_option_setter(dataset_mode)
# parser = dataset_option_setter(parser, self.isTrain)
opt, unknown = parser.parse_known_args()
# if there is opt_file, load it.
# The previous default options will be overwritten
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ""
message += "----------------- Options ---------------\n"
for k, v in sorted(vars(opt).items()):
comment = ""
default = self.parser.get_default(k)
if v != default:
comment = "\t[default: %s]" % str(default)
message += "{:>25}: {:<30}{}\n".format(str(k), str(v), comment)
message += "----------------- End -------------------"
# print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, "opt")
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open(file_name + ".txt", "wt") as opt_file:
for k, v in sorted(vars(opt).items()):
comment = ""
default = self.parser.get_default(k)
if v != default:
comment = "\t[default: %s]" % str(default)
opt_file.write("{:>25}: {:<30}{}\n".format(str(k), str(v), comment))
with open(file_name + ".pkl", "wb") as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for k, v in sorted(vars(opt).items()):
if hasattr(new_opt, k) and v != getattr(new_opt, k):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open(file_name + ".pkl", "rb"))
return new_opt
def parse(self, save=False):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
opt.contain_dontcare_label = False
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
# Set semantic_nc based on the option.
# This will be convenient in many places
opt.semantic_nc = (
opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if opt.no_instance else 1)
)
# set gpu ids
str_ids = opt.gpu_ids.split(",")
opt.gpu_ids = []
for str_id in str_ids:
int_id = int(str_id)
if int_id >= 0:
opt.gpu_ids.append(int_id)
if len(opt.gpu_ids) > 0:
print("The main GPU is ")
print(opt.gpu_ids[0])
torch.cuda.set_device(opt.gpu_ids[0])
assert (
len(opt.gpu_ids) == 0 or opt.batchSize % len(opt.gpu_ids) == 0
), "Batch size %d is wrong. It must be a multiple of # GPUs %d." % (opt.batchSize, len(opt.gpu_ids))
self.opt = opt
return self.opt
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/options/base_options.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/options/base_options.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 5057
}
| 174 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
|
Bringing-Old-Photos-Back-to-Life/Global/data/image_folder.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/data/image_folder.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 763
}
| 175 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import os
from util import util
import torch
class BaseOptions:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
# experiment specifics
self.parser.add_argument(
"--name",
type=str,
default="label2city",
help="name of the experiment. It decides where to store samples and models",
)
self.parser.add_argument(
"--gpu_ids", type=str, default="0", help="gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU"
)
self.parser.add_argument(
"--checkpoints_dir", type=str, default="./checkpoints", help="models are saved here"
) ## note: to add this param when using philly
# self.parser.add_argument('--project_dir', type=str, default='./', help='the project is saved here') ################### This is necessary for philly
self.parser.add_argument(
"--outputs_dir", type=str, default="./outputs", help="models are saved here"
) ## note: to add this param when using philly Please end with '/'
self.parser.add_argument("--model", type=str, default="pix2pixHD", help="which model to use")
self.parser.add_argument(
"--norm", type=str, default="instance", help="instance normalization or batch normalization"
)
self.parser.add_argument("--use_dropout", action="store_true", help="use dropout for the generator")
self.parser.add_argument(
"--data_type",
default=32,
type=int,
choices=[8, 16, 32],
help="Supported data type i.e. 8, 16, 32 bit",
)
self.parser.add_argument("--verbose", action="store_true", default=False, help="toggles verbose")
# input/output sizes
self.parser.add_argument("--batchSize", type=int, default=1, help="input batch size")
self.parser.add_argument("--loadSize", type=int, default=1024, help="scale images to this size")
self.parser.add_argument("--fineSize", type=int, default=512, help="then crop to this size")
self.parser.add_argument("--label_nc", type=int, default=35, help="# of input label channels")
self.parser.add_argument("--input_nc", type=int, default=3, help="# of input image channels")
self.parser.add_argument("--output_nc", type=int, default=3, help="# of output image channels")
# for setting inputs
self.parser.add_argument("--dataroot", type=str, default="./datasets/cityscapes/")
self.parser.add_argument(
"--resize_or_crop",
type=str,
default="scale_width",
help="scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]",
)
self.parser.add_argument(
"--serial_batches",
action="store_true",
help="if true, takes images in order to make batches, otherwise takes them randomly",
)
self.parser.add_argument(
"--no_flip",
action="store_true",
help="if specified, do not flip the images for data argumentation",
)
self.parser.add_argument("--nThreads", default=2, type=int, help="# threads for loading data")
self.parser.add_argument(
"--max_dataset_size",
type=int,
default=float("inf"),
help="Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.",
)
# for displays
self.parser.add_argument("--display_winsize", type=int, default=512, help="display window size")
self.parser.add_argument(
"--tf_log",
action="store_true",
help="if specified, use tensorboard logging. Requires tensorflow installed",
)
# for generator
self.parser.add_argument("--netG", type=str, default="global", help="selects model to use for netG")
self.parser.add_argument("--ngf", type=int, default=64, help="# of gen filters in first conv layer")
self.parser.add_argument("--k_size", type=int, default=3, help="# kernel size conv layer")
self.parser.add_argument("--use_v2", action="store_true", help="use DCDCv2")
self.parser.add_argument("--mc", type=int, default=1024, help="# max channel")
self.parser.add_argument("--start_r", type=int, default=3, help="start layer to use resblock")
self.parser.add_argument(
"--n_downsample_global", type=int, default=4, help="number of downsampling layers in netG"
)
self.parser.add_argument(
"--n_blocks_global",
type=int,
default=9,
help="number of residual blocks in the global generator network",
)
self.parser.add_argument(
"--n_blocks_local",
type=int,
default=3,
help="number of residual blocks in the local enhancer network",
)
self.parser.add_argument(
"--n_local_enhancers", type=int, default=1, help="number of local enhancers to use"
)
self.parser.add_argument(
"--niter_fix_global",
type=int,
default=0,
help="number of epochs that we only train the outmost local enhancer",
)
self.parser.add_argument(
"--load_pretrain",
type=str,
default="",
help="load the pretrained model from the specified location",
)
# for instance-wise features
self.parser.add_argument(
"--no_instance", action="store_true", help="if specified, do *not* add instance map as input"
)
self.parser.add_argument(
"--instance_feat",
action="store_true",
help="if specified, add encoded instance features as input",
)
self.parser.add_argument(
"--label_feat", action="store_true", help="if specified, add encoded label features as input"
)
self.parser.add_argument("--feat_num", type=int, default=3, help="vector length for encoded features")
self.parser.add_argument(
"--load_features", action="store_true", help="if specified, load precomputed feature maps"
)
self.parser.add_argument(
"--n_downsample_E", type=int, default=4, help="# of downsampling layers in encoder"
)
self.parser.add_argument(
"--nef", type=int, default=16, help="# of encoder filters in the first conv layer"
)
self.parser.add_argument("--n_clusters", type=int, default=10, help="number of clusters for features")
# diy
self.parser.add_argument("--self_gen", action="store_true", help="self generate")
self.parser.add_argument(
"--mapping_n_block", type=int, default=3, help="number of resblock in mapping"
)
self.parser.add_argument("--map_mc", type=int, default=64, help="max channel of mapping")
self.parser.add_argument("--kl", type=float, default=0, help="KL Loss")
self.parser.add_argument(
"--load_pretrainA",
type=str,
default="",
help="load the pretrained model from the specified location",
)
self.parser.add_argument(
"--load_pretrainB",
type=str,
default="",
help="load the pretrained model from the specified location",
)
self.parser.add_argument("--feat_gan", action="store_true")
self.parser.add_argument("--no_cgan", action="store_true")
self.parser.add_argument("--map_unet", action="store_true")
self.parser.add_argument("--map_densenet", action="store_true")
self.parser.add_argument("--fcn", action="store_true")
self.parser.add_argument("--is_image", action="store_true", help="train image recon only pair data")
self.parser.add_argument("--label_unpair", action="store_true")
self.parser.add_argument("--mapping_unpair", action="store_true")
self.parser.add_argument("--unpair_w", type=float, default=1.0)
self.parser.add_argument("--pair_num", type=int, default=-1)
self.parser.add_argument("--Gan_w", type=float, default=1)
self.parser.add_argument("--feat_dim", type=int, default=-1)
self.parser.add_argument("--abalation_vae_len", type=int, default=-1)
######################### useless, just to cooperate with docker
self.parser.add_argument("--gpu", type=str)
self.parser.add_argument("--dataDir", type=str)
self.parser.add_argument("--modelDir", type=str)
self.parser.add_argument("--logDir", type=str)
self.parser.add_argument("--data_dir", type=str)
self.parser.add_argument("--use_skip_model", action="store_true")
self.parser.add_argument("--use_segmentation_model", action="store_true")
self.parser.add_argument("--spatio_size", type=int, default=64)
self.parser.add_argument("--test_random_crop", action="store_true")
##########################
self.parser.add_argument("--contain_scratch_L", action="store_true")
self.parser.add_argument(
"--mask_dilation", type=int, default=0
) ## Don't change the input, only dilation the mask
self.parser.add_argument(
"--irregular_mask", type=str, default="", help="This is the root of the mask"
)
self.parser.add_argument(
"--mapping_net_dilation",
type=int,
default=1,
help="This parameter is the dilation size of the translation net",
)
self.parser.add_argument(
"--VOC", type=str, default="VOC_RGB_JPEGImages.bigfile", help="The root of VOC dataset"
)
self.parser.add_argument("--non_local", type=str, default="", help="which non_local setting")
self.parser.add_argument(
"--NL_fusion_method",
type=str,
default="add",
help="how to fuse the origin feature and nl feature",
)
self.parser.add_argument(
"--NL_use_mask", action="store_true", help="If use mask while using Non-local mapping model"
)
self.parser.add_argument(
"--correlation_renormalize",
action="store_true",
help="Since after mask out the correlation matrix(which is softmaxed), the sum is not 1 any more, enable this param to re-weight",
)
self.parser.add_argument("--Smooth_L1", action="store_true", help="Use L1 Loss in image level")
self.parser.add_argument(
"--face_restore_setting", type=int, default=1, help="This is for the aligned face restoration"
)
self.parser.add_argument("--face_clean_url", type=str, default="")
self.parser.add_argument("--syn_input_url", type=str, default="")
self.parser.add_argument("--syn_gt_url", type=str, default="")
self.parser.add_argument(
"--test_on_synthetic",
action="store_true",
help="If you want to test on the synthetic data, enable this parameter",
)
self.parser.add_argument("--use_SN", action="store_true", help="Add SN to every parametric layer")
self.parser.add_argument(
"--use_two_stage_mapping", action="store_true", help="choose the model which uses two stage"
)
self.parser.add_argument("--L1_weight", type=float, default=10.0)
self.parser.add_argument("--softmax_temperature", type=float, default=1.0)
self.parser.add_argument(
"--patch_similarity",
action="store_true",
help="Enable this denotes using 3*3 patch to calculate similarity",
)
self.parser.add_argument(
"--use_self",
action="store_true",
help="Enable this denotes that while constructing the new feature maps, using original feature (diagonal == 1)",
)
self.parser.add_argument("--use_own_dataset", action="store_true")
self.parser.add_argument(
"--test_hole_two_folders",
action="store_true",
help="Enable this parameter means test the restoration with inpainting given twp folders which are mask and old respectively",
)
self.parser.add_argument(
"--no_hole",
action="store_true",
help="While test the full_model on non_scratch data, do not add random mask into the real old photos",
) ## Only for testing
self.parser.add_argument(
"--random_hole",
action="store_true",
help="While training the full model, 50% probability add hole",
)
self.parser.add_argument("--NL_res", action="store_true", help="NL+Resdual Block")
self.parser.add_argument("--image_L1", action="store_true", help="Image level loss: L1")
self.parser.add_argument(
"--hole_image_no_mask",
action="store_true",
help="while testing, give hole image but not give the mask",
)
self.parser.add_argument(
"--down_sample_degradation",
action="store_true",
help="down_sample the image only, corresponds to [down_sample_face]",
)
self.parser.add_argument(
"--norm_G", type=str, default="spectralinstance", help="The norm type of Generator"
)
self.parser.add_argument(
"--init_G",
type=str,
default="xavier",
help="normal|xavier|xavier_uniform|kaiming|orthogonal|none",
)
self.parser.add_argument("--use_new_G", action="store_true")
self.parser.add_argument("--use_new_D", action="store_true")
self.parser.add_argument(
"--only_voc", action="store_true", help="test the trianed celebA face model using VOC face"
)
self.parser.add_argument(
"--cosin_similarity",
action="store_true",
help="For non-local, using cosin to calculate the similarity",
)
self.parser.add_argument(
"--downsample_mode",
type=str,
default="nearest",
help="For partial non-local, choose how to downsample the mask",
)
self.parser.add_argument("--mapping_exp",type=int,default=0,help='Default 0: original PNL|1: Multi-Scale Patch Attention')
self.parser.add_argument("--inference_optimize",action='store_true',help='optimize the memory cost')
self.initialized = True
def parse(self, save=True):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(",")
self.opt.gpu_ids = []
for str_id in str_ids:
int_id = int(str_id)
if int_id >= 0:
self.opt.gpu_ids.append(int_id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
# pass
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
# print('------------ Options -------------')
# for k, v in sorted(args.items()):
# print('%s: %s' % (str(k), str(v)))
# print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
if save and not self.opt.continue_train:
file_name = os.path.join(expr_dir, "opt.txt")
with open(file_name, "wt") as opt_file:
opt_file.write("------------ Options -------------\n")
for k, v in sorted(args.items()):
opt_file.write("%s: %s\n" % (str(k), str(v)))
opt_file.write("-------------- End ----------------\n")
return self.opt
|
Bringing-Old-Photos-Back-to-Life/Global/options/base_options.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/options/base_options.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 7077
}
| 176 |
#!/bin/sh
cd Face_Enhancement/models/networks
git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm .
cd ../../../
cd Global/detection_models
git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm .
cd ../../
# download the landmark detection model
cd Face_Detection/
wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
bzip2 -d shape_predictor_68_face_landmarks.dat.bz2
cd ../
# download the pretrained model
cd Face_Enhancement/
wget https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Face_Enhancement/checkpoints.zip
unzip checkpoints.zip
cd ../
cd Global/
wget https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Global/checkpoints.zip
unzip checkpoints.zip
cd ../
|
Bringing-Old-Photos-Back-to-Life/download-weights/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/download-weights",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 315
}
| 177 |
"""
This is an example using CLAP to perform zeroshot
classification on ESC50 (https://github.com/karolpiczak/ESC-50).
"""
from msclap import CLAP
from esc50_dataset import ESC50
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score
# Load dataset
root_path = "root_path" # Folder with ESC-50-master/
dataset = ESC50(root=root_path, download=True) #If download=False code assumes base_folder='ESC-50-master' in esc50_dataset.py
prompt = 'this is the sound of '
y = [prompt + x for x in dataset.classes]
# Load and initialize CLAP
clap_model = CLAP(version = '2023', use_cuda=False)
# Computing text embeddings
text_embeddings = clap_model.get_text_embeddings(y)
# Computing audio embeddings
y_preds, y_labels = [], []
for i in tqdm(range(len(dataset))):
x, _, one_hot_target = dataset.__getitem__(i)
audio_embeddings = clap_model.get_audio_embeddings([x], resample=True)
similarity = clap_model.compute_similarity(audio_embeddings, text_embeddings)
y_pred = F.softmax(similarity.detach().cpu(), dim=1).numpy()
y_preds.append(y_pred)
y_labels.append(one_hot_target.detach().cpu().numpy())
y_labels, y_preds = np.concatenate(y_labels, axis=0), np.concatenate(y_preds, axis=0)
acc = accuracy_score(np.argmax(y_labels, axis=1), np.argmax(y_preds, axis=1))
print('ESC50 Accuracy {}'.format(acc))
"""
The output:
ESC50 Accuracy: 93.9%
"""
|
CLAP/examples/zero_shot_classification.py/0
|
{
"file_path": "CLAP/examples/zero_shot_classification.py",
"repo_id": "CLAP",
"token_count": 556
}
| 178 |
[tool.poetry]
name = "msclap"
version = "1.3.3"
description = "CLAP (Contrastive Language-Audio Pretraining) is a model that learns acoustic concepts from natural language supervision and enables “Zero-Shot” inference. The model has been extensively evaluated in 26 audio downstream tasks achieving SoTA in several of them including classification, retrieval, and captioning."
authors = ["Benjamin Elizalde", "Soham Deshmukh", "Huaming Wang"]
license = "MIT"
readme = "README.md"
packages = [
{ include = "msclap" },
]
[tool.poetry.dependencies]
python = "^3.8"
librosa = "^0.10.1"
numpy = "^1.23.0"
numba = "^0.58.0"
pandas = "^2.0.0"
torch = "^2.1.0"
torchaudio = "^2.1.0"
torchlibrosa = "^0.1.0"
torchvision = "^0.16.0"
tqdm = "^4.66.1"
transformers = "^4.34.0"
pyyaml = "^6.0.1"
scikit-learn = "^1.3.1"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
|
CLAP/pyproject.toml/0
|
{
"file_path": "CLAP/pyproject.toml",
"repo_id": "CLAP",
"token_count": 358
}
| 179 |
.. role:: hidden
:class: hidden-section
.. module:: fairseq.models
.. _Models:
Models
======
A Model defines the neural network's ``forward()`` method and encapsulates all
of the learnable parameters in the network. Each model also provides a set of
named *architectures* that define the precise network configuration (e.g.,
embedding dimension, number of layers, etc.).
Both the model type and architecture are selected via the ``--arch``
command-line argument. Once selected, a model may expose additional command-line
arguments for further configuration.
.. note::
All fairseq Models extend :class:`BaseFairseqModel`, which in turn extends
:class:`torch.nn.Module`. Thus any fairseq Model can be used as a
stand-alone Module in other PyTorch code.
Convolutional Neural Networks (CNN)
-----------------------------------
.. module:: fairseq.models.fconv
.. autoclass:: fairseq.models.fconv.FConvModel
:members:
.. autoclass:: fairseq.models.fconv.FConvEncoder
:members:
:undoc-members:
.. autoclass:: fairseq.models.fconv.FConvDecoder
:members:
Long Short-Term Memory (LSTM) networks
--------------------------------------
.. module:: fairseq.models.lstm
.. autoclass:: fairseq.models.lstm.LSTMModel
:members:
.. autoclass:: fairseq.models.lstm.LSTMEncoder
:members:
.. autoclass:: fairseq.models.lstm.LSTMDecoder
:members:
Transformer (self-attention) networks
-------------------------------------
.. module:: fairseq.models.transformer
.. autoclass:: fairseq.models.transformer.TransformerModel
:members:
.. autoclass:: fairseq.models.transformer.TransformerEncoder
:members:
.. autoclass:: fairseq.models.transformer.TransformerEncoderLayer
:members:
.. autoclass:: fairseq.models.transformer.TransformerDecoder
:members:
.. autoclass:: fairseq.models.transformer.TransformerDecoderLayer
:members:
Adding new models
-----------------
.. currentmodule:: fairseq.models
.. autofunction:: fairseq.models.register_model
.. autofunction:: fairseq.models.register_model_architecture
.. autoclass:: fairseq.models.BaseFairseqModel
:members:
:undoc-members:
.. autoclass:: fairseq.models.FairseqEncoderDecoderModel
:members:
:undoc-members:
.. autoclass:: fairseq.models.FairseqEncoderModel
:members:
:undoc-members:
.. autoclass:: fairseq.models.FairseqLanguageModel
:members:
:undoc-members:
.. autoclass:: fairseq.models.FairseqMultiModel
:members:
:undoc-members:
.. autoclass:: fairseq.models.FairseqEncoder
:members:
.. autoclass:: fairseq.models.CompositeEncoder
:members:
.. autoclass:: fairseq.models.FairseqDecoder
:members:
.. _Incremental decoding:
Incremental decoding
--------------------
.. autoclass:: fairseq.models.FairseqIncrementalDecoder
:members:
:undoc-members:
|
COCO-LM/fairseq/docs/models.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/models.rst",
"repo_id": "COCO-LM",
"token_count": 913
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from .adaptive_span_model import TransformerSeq as AdaptiveSpanTransformerModel
logger = logging.getLogger(__name__)
@dataclass
class AdaptiveSpanSmallConfig(FairseqDataclass):
# defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh
vocab_size: int = 50
d_model: int = 256
n_head: int = 4
d_inner: int = 1024
n_layer: int = 8
attn_span: int = 1024
dropout: float = 0.0
emb_dropout: float = 0.0
adapt_span_ramp: int = 32
adapt_span_init: float = 0.0
aux_loss_scaler: float = 0.000002
adapt_span_layer: bool = False
@register_model("adaptive_span", dataclass=AdaptiveSpanSmallConfig)
class AdaptiveSpanTransformer(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: AdaptiveSpanSmallConfig, task):
return cls(AdaptiveSpanDecoder(cfg, task))
def get_aux_loss(self):
return self.decoder.get_aux_loss()
def get_current_max_span(self):
return self.decoder.get_current_max_span()
def get_current_avg_span(self):
return self.decoder.get_current_avg_span()
class AdaptiveSpanDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
super().__init__(task.target_dictionary)
self.config = cfg
config = AdaptiveSpanSmallConfig(
vocab_size=len(task.target_dictionary),
d_model=cfg.d_model,
n_head=cfg.n_head,
d_inner=cfg.d_inner,
n_layer=cfg.n_layer,
attn_span=cfg.attn_span,
dropout=cfg.dropout,
emb_dropout=cfg.emb_dropout,
adapt_span_ramp=cfg.adapt_span_ramp,
adapt_span_init=cfg.adapt_span_init,
aux_loss_scaler=cfg.aux_loss_scaler,
adapt_span_layer=cfg.adapt_span_layer,
)
logger.info(config)
self.model = AdaptiveSpanTransformerModel(**config.__dict__)
self._mems = None
def forward(
self,
src_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
bsz = src_tokens.size(0)
if incremental_state is not None: # used during inference
mems = self.get_incremental_state("mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
if mems is None:
# first time init
mems = self.init_hid_cache(bsz)
output = self.model(x=src_tokens, h_cache=mems,)
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.config.attn_span
def init_hid_cache(self, batch_sz):
hid = []
for layer in self.model.layers:
param = next(self.model.parameters())
h = torch.zeros(
batch_sz,
layer.get_cache_size(),
self.config.d_model,
dtype=param.dtype,
device=param.device,
)
hid.append(h)
return hid
def get_aux_loss(self):
return self.model.get_aux_loss()
def get_current_max_span(self):
return self.model.get_current_max_span()
def get_current_avg_span(self):
return self.model.get_current_avg_span()
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
raise NotImplementedError("This is required for generation/beam search")
# mems = self.get_incremental_state(incremental_state, "mems")
# if mems is not None:
# new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
# self.set_incremental_state(incremental_state, "mems", new_mems)
|
COCO-LM/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py",
"repo_id": "COCO-LM",
"token_count": 2101
}
| 181 |
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a pretrained language model trained on 138GB of French text based on RoBERTa.
Also available in [github.com/huggingface/transformers](https://github.com/huggingface/transformers/).
## Pre-trained models
| Model | #params | Download | Arch. | Training data |
|--------------------------------|---------|--------------------------------------------------------------------------------------------------------------------------|-------|-----------------------------------|
| `camembert` / `camembert-base` | 110M | [camembert-base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz) | Base | OSCAR (138 GB of text) |
| `camembert-large` | 335M | [camembert-large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/camembert-large.tar.gz) | Large | CCNet (135 GB of text) |
| `camembert-base-ccnet` | 110M | [camembert-base-ccnet.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet.tar.gz) | Base | CCNet (135 GB of text) |
| `camembert-base-wikipedia-4gb` | 110M | [camembert-base-wikipedia-4gb.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/camembert-base-wikipedia-4gb.tar.gz) | Base | Wikipedia (4 GB of text) |
| `camembert-base-oscar-4gb` | 110M | [camembert-base-oscar-4gb.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/camembert-base-oscar-4gb.tar.gz) | Base | Subsample of OSCAR (4 GB of text) |
| `camembert-base-ccnet-4gb` | 110M | [camembert-base-ccnet-4gb.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet-4gb.tar.gz) | Base | Subsample of CCNet (4 GB of text) |
## Example usage
### fairseq
##### Load CamemBERT from torch.hub (PyTorch >= 1.1):
```python
import torch
camembert = torch.hub.load('pytorch/fairseq', 'camembert')
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Load CamemBERT (for PyTorch 1.0 or custom models):
```python
# Download camembert model
wget https://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz
tar -xzvf camembert.tar.gz
# Load the model in fairseq
from fairseq.models.roberta import CamembertModel
camembert = CamembertModel.from_pretrained('/path/to/camembert')
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks:
```python
masked_line = 'Le camembert est <mask> :)'
camembert.fill_mask(masked_line, topk=3)
# [('Le camembert est délicieux :)', 0.4909118115901947, ' délicieux'),
# ('Le camembert est excellent :)', 0.10556942224502563, ' excellent'),
# ('Le camembert est succulent :)', 0.03453322499990463, ' succulent')]
```
##### Extract features from Camembert:
```python
# Extract the last layer's features
line = "J'aime le camembert !"
tokens = camembert.encode(line)
last_layer_features = camembert.extract_features(tokens)
assert last_layer_features.size() == torch.Size([1, 10, 768])
# Extract all layer's features (layer 0 is the embedding layer)
all_layers = camembert.extract_features(tokens, return_all_hiddens=True)
assert len(all_layers) == 13
assert torch.all(all_layers[-1] == last_layer_features)
```
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
|
COCO-LM/fairseq/examples/camembert/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/camembert/README.md",
"repo_id": "COCO-LM",
"token_count": 1625
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, List, Optional
from torch import Tensor
import torch
import torch.nn as nn
from fairseq.models import (
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
base_architecture,
Embedding,
TransformerModel,
TransformerEncoder,
TransformerDecoder,
)
from fairseq.modules import (
TransformerDecoderLayer,
)
logger = logging.getLogger(__name__)
@register_model("laser_transformer")
class LaserTransformerModel(FairseqEncoderDecoderModel):
"""Train Transformer for LASER task
Requires --task laser
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens=None,
tgt_tokens=None,
tgt_lengths=None,
target_language_id=-1,
dataset_name="",
):
laser_encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(
prev_output_tokens, laser_encoder_out, lang_id=target_language_id
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--decoder-lang-embed-dim",
type=int,
metavar="N",
help="decoder language embedding dimension",
)
@classmethod
def build_model(cls, args, task):
base_laser_transformer_architecture(args)
num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
def load_embed_tokens(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
encoder_embed_tokens = load_embed_tokens(
task.source_dictionary, args.encoder_embed_dim
)
decoder_embed_tokens = load_embed_tokens(
task.target_dictionary, args.decoder_embed_dim
)
num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
encoder = LaserTransformerEncoder(
args, task.source_dictionary, encoder_embed_tokens
)
decoder = LaserTransformerDecoder(
args,
task.target_dictionary,
decoder_embed_tokens,
num_langs=num_langs,
lang_embed_dim=args.decoder_lang_embed_dim,
)
return cls(encoder, decoder)
class LaserTransformerEncoder(TransformerEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, src_tokens, *args, **kwargs):
encoder_out = super().forward(src_tokens, *args, **kwargs)
x = encoder_out["encoder_out"][0] # T x B x C
padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)
if padding_mask.any():
x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x)
# Build the sentence embedding by max-pooling over the encoder outputs
sentemb = x.max(dim=0)[0]
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `foward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {"sentemb": [sentemb]} # B x C
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Same as the one in transformer.py, with new_sentemb
"""
if len(encoder_out["sentemb"]) == 0:
new_sentemb = []
else:
new_sentemb = [encoder_out["sentemb"][0].index_select(0, new_order)]
return {
"sentemb": new_sentemb, # B x C
}
class LaserTransformerDecoder(TransformerDecoder):
def __init__(self, args, dictionary, *kargs, **kwargs):
self.num_langs = kwargs.get("num_langs", 1)
self.lang_embed_dim = kwargs.get("lang_embed_dim", 0)
kwargs.pop("num_langs", None)
kwargs.pop("lang_embed_dim", None)
super().__init__(args, dictionary, *kargs, **kwargs, no_encoder_attn=True)
if self.lang_embed_dim == 0:
self.embed_lang = None
else:
self.embed_lang = nn.Embedding(self.num_langs, self.lang_embed_dim)
nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1)
if self.output_projection is not None:
laser_output_embed_dim = (
self.output_embed_dim + self.lang_embed_dim + args.encoder_embed_dim
)
self.output_projection = nn.Linear(
laser_output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight,
mean=0,
std=laser_output_embed_dim ** -0.5,
)
def build_decoder_layer(self, args, no_encoder_attn=False):
decoder_embed_dim = args.decoder_embed_dim
args.decoder_embed_dim = (
decoder_embed_dim + self.lang_embed_dim + args.encoder_embed_dim
)
res = TransformerDecoderLayer(args, no_encoder_attn=True)
args.decoder_embed_dim = decoder_embed_dim
return res
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
lang_id: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.embed_lang is not None:
lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id)
langemb = self.embed_lang(lang_ids)
langemb = langemb.unsqueeze(0)
repeat_vals = [x.shape[0] // langemb.shape[0]] + [-1] * (
len(langemb.shape) - 1
)
x = torch.cat((x, langemb.expand(*repeat_vals)), dim=-1)
sentemb = encoder_out["sentemb"][0]
sentemb = sentemb.unsqueeze(0)
repeat_vals = [x.shape[0] // sentemb.shape[0]] + [-1] * (len(sentemb.shape) - 1)
x = torch.cat((x, sentemb.expand(*repeat_vals)), dim=-1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
None,
None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
lang_id: Optional[int] = None,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
assert lang_id is not None
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
lang_id=lang_id,
)
if not features_only:
x = self.output_layer(x)
return x, extra
@register_model_architecture("laser_transformer", "laser_transformer")
def base_laser_transformer_architecture(args):
base_architecture(args)
args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
|
COCO-LM/fairseq/examples/laser/laser_src/laser_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/laser/laser_src/laser_transformer.py",
"repo_id": "COCO-LM",
"token_count": 5621
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Linformer: Self-Attention with Linear Complexity
"""
import logging
import torch
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.roberta import (
init_bert_params,
roberta_base_architecture,
roberta_large_architecture,
RobertaEncoder,
RobertaModel,
)
from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder
logger = logging.getLogger(__name__)
@register_model("linformer_roberta")
class LinformerModel(RobertaModel):
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
# add args for Linformer
parser.add_argument(
"--compressed", type=int, help="compressed ratio of sequence length"
)
parser.add_argument(
"--shared-kv-compressed",
type=int,
help="share compressed matrix between k and v, in each layer",
)
parser.add_argument(
"--shared-layer-kv-compressed",
type=int,
help="share compressed matrix between k and v and across all layers",
)
parser.add_argument(
"--freeze-compress",
type=int,
help="freeze the parameters in compressed layer",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
if not hasattr(args, "max_positions"):
args.max_positions = args.tokens_per_sample
encoder = LinformerEncoder(args, task.source_dictionary)
return cls(args, encoder)
class LinformerEncoder(RobertaEncoder):
"""Linformer encoder."""
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.register_buffer("version", torch.tensor(2))
def build_encoder(self, args, dictionary, embed_tokens):
encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
# some old checkpoints had weight sharing implemented incorrectly
# (note: this was correct in the original paper code)
if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2:
state_dict[f"{prefix}version"] = torch.tensor(1)
# check if input embeddings and output embeddings were tied
if not torch.allclose(
state_dict[f"{prefix}sentence_encoder.embed_tokens.weight"],
state_dict[f"{prefix}lm_head.weight"],
):
# they weren't tied, re-init the LM head without weight sharing
self.lm_head = self.build_lm_head(
embed_dim=self.args.encoder_embed_dim,
output_dim=len(self.dictionary),
activation_fn=self.args.activation_fn,
weight=None, # don't share weights
)
@register_model_architecture("linformer_roberta", "linformer_roberta")
def base_architecture(args):
args.compressed = getattr(args, "compressed", 4)
args.shared_kv_compressed = getattr(args, "shared_kv_compressed", 0)
args.shared_layer_kv_compressed = getattr(args, "shared_layer_kv_compressed", 0)
args.freeze_compress = getattr(args, "freeze_compress", 0)
roberta_base_architecture(args)
@register_model_architecture("linformer_roberta", "linformer_roberta_base")
def linformer_roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("linformer_roberta", "linformer_roberta_large")
def linformer_roberta_large_architecture(args):
roberta_large_architecture(args)
base_architecture(args)
|
COCO-LM/fairseq/examples/linformer/linformer_src/models/linformer_roberta.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/linformer/linformer_src/models/linformer_roberta.py",
"repo_id": "COCO-LM",
"token_count": 1705
}
| 184 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pythainlp import word_tokenize
for line in sys.stdin:
print(" ".join(word_tokenize(line.strip())))
|
COCO-LM/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/m2m_100/tokenizers/tokenize_thai.py",
"repo_id": "COCO-LM",
"token_count": 99
}
| 185 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
set -e
set -o pipefail
SRC=en
SI_TGT=si
NE_TGT=ne
DESTDIR=${WORKDIR_ROOT}/ML50/raw/
ROOT=${WORKDIR_ROOT}/tmp
mkdir -p $ROOT
DATA=$ROOT/data
NE_ROOT=$DATA/all-clean-ne
SI_ROOT=$DATA/all-clean-si
mkdir -p $DATA $NE_ROOT $SI_ROOT
SI_OPUS_DATASETS=(
"$SI_ROOT/GNOME.en-si"
"$SI_ROOT/Ubuntu.en-si"
"$SI_ROOT/KDE4.en-si"
"$SI_ROOT/OpenSubtitles.en-si"
)
SI_OPUS_URLS=(
"https://object.pouta.csc.fi/OPUS-GNOME/v1/moses/en-si.txt.zip"
"https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/moses/en-si.txt.zip"
"https://object.pouta.csc.fi/OPUS-KDE4/v2/moses/en-si.txt.zip"
"https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2018/moses/en-si.txt.zip"
)
NE_OPUS_DATASETS=(
"$NE_ROOT/GNOME.en-ne"
"$NE_ROOT/Ubuntu.en-ne"
"$NE_ROOT/KDE4.en-ne"
)
NE_OPUS_URLS=(
"https://object.pouta.csc.fi/OPUS-GNOME/v1/moses/en-ne.txt.zip"
"https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/moses/en-ne.txt.zip"
"https://object.pouta.csc.fi/OPUS-KDE4/v2/moses/en-ne.txt.zip"
)
REMOVE_FILE_PATHS=()
# Download data
download_data() {
CORPORA=$1
URL=$2
if [ -f $CORPORA ]; then
echo "$CORPORA already exists, skipping download"
else
echo "Downloading $URL"
wget $URL -O $CORPORA --no-check-certificate || rm -f $CORPORA
if [ -f $CORPORA ]; then
echo "$URL successfully downloaded."
else
echo "$URL not successfully downloaded."
rm -f $CORPORA
exit -1
fi
fi
}
# Example: download_opus_data $LANG_ROOT $TGT
download_opus_data() {
LANG_ROOT=$1
TGT=$2
if [ "$TGT" = "si" ]; then
URLS=("${SI_OPUS_URLS[@]}")
DATASETS=("${SI_OPUS_DATASETS[@]}")
else
URLS=("${NE_OPUS_URLS[@]}")
DATASETS=("${NE_OPUS_DATASETS[@]}")
fi
# Download and extract data
for ((i=0;i<${#URLS[@]};++i)); do
URL=${URLS[i]}
CORPORA=${DATASETS[i]}
download_data $CORPORA $URL
unzip -o $CORPORA -d $LANG_ROOT
REMOVE_FILE_PATHS+=( $CORPORA $CORPORA.xml $CORPORA.ids $LANG_ROOT/README $LANG_ROOT/LICENSE )
done
cat ${DATASETS[0]}.$SRC ${DATASETS[1]}.$SRC ${DATASETS[2]}.$SRC > $LANG_ROOT/GNOMEKDEUbuntu.$SRC-$TGT.$SRC
cat ${DATASETS[0]}.$TGT ${DATASETS[1]}.$TGT ${DATASETS[2]}.$TGT > $LANG_ROOT/GNOMEKDEUbuntu.$SRC-$TGT.$TGT
REMOVE_FILE_PATHS+=( ${DATASETS[0]}.$SRC ${DATASETS[1]}.$SRC ${DATASETS[2]}.$SRC )
REMOVE_FILE_PATHS+=( ${DATASETS[0]}.$TGT ${DATASETS[1]}.$TGT ${DATASETS[2]}.$TGT )
}
download_opus_data $SI_ROOT $SI_TGT
cp ${SI_OPUS_DATASETS[3]}.$SRC $SI_ROOT/OpenSubtitles2018.$SRC-$SI_TGT.$SRC
cp ${SI_OPUS_DATASETS[3]}.$SI_TGT $SI_ROOT/OpenSubtitles2018.$SRC-$SI_TGT.$SI_TGT
REMOVE_FILE_PATHS+=( ${SI_OPUS_DATASETS[3]}.$SRC ${SI_OPUS_DATASETS[3]}.$SI_TGT )
download_opus_data $NE_ROOT $NE_TGT
# Download and extract Global Voices data
GLOBAL_VOICES="$NE_ROOT/globalvoices.2018q4.ne-en"
GLOBAL_VOICES_URL="http://www.casmacat.eu/corpus/global-voices/globalvoices.ne-en.xliff.gz"
download_data $GLOBAL_VOICES.gz $GLOBAL_VOICES_URL
gunzip -Nf $GLOBAL_VOICES.gz
sed -ne 's?.*<source>\(.*\)</source>.*?\1?p' $GLOBAL_VOICES > $GLOBAL_VOICES.$NE_TGT
sed -ne 's?.*<target[^>]*>\(.*\)</target>.*?\1?p' $GLOBAL_VOICES > $GLOBAL_VOICES.$SRC
REMOVE_FILE_PATHS+=( $GLOBAL_VOICES )
# Download and extract the bible dataset
BIBLE_TOOLS=bible-corpus-tools
XML_BIBLES=XML_Bibles
XML_BIBLES_DUP=XML_Bibles_dup
if [ ! -e $BIBLE_TOOLS ]; then
echo "Cloning bible-corpus-tools repository..."
git clone https://github.com/christos-c/bible-corpus-tools.git
fi
mkdir -p $BIBLE_TOOLS/bin $XML_BIBLES $XML_BIBLES_DUP
javac -cp "$BIBLE_TOOLS/lib/*" -d $BIBLE_TOOLS/bin $BIBLE_TOOLS/src/bible/readers/*.java $BIBLE_TOOLS/src/bible/*.java
download_data bible.tar.gz "https://github.com/christos-c/bible-corpus/archive/v1.2.1.tar.gz"
tar xvzf bible.tar.gz
cp bible-corpus-1.2.1/bibles/{Greek.xml,English.xml,Nepali.xml} $XML_BIBLES/
cp bible-corpus-1.2.1/bibles/{Greek.xml,English-WEB.xml,Nepali.xml} $XML_BIBLES_DUP/
java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateMLBooks $XML_BIBLES
java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateMLBooks $XML_BIBLES_DUP
java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateVerseAlignedBooks $XML_BIBLES
java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateVerseAlignedBooks $XML_BIBLES_DUP
cat $XML_BIBLES/aligned/*/English.txt > $NE_ROOT/bible.$SRC-$NE_TGT.$SRC
cat $XML_BIBLES/aligned/*/Nepali.txt > $NE_ROOT/bible.$SRC-$NE_TGT.$NE_TGT
cat $XML_BIBLES_DUP/aligned/*/English-WEB.txt > $NE_ROOT/bible_dup.$SRC-$NE_TGT.$SRC
cat $XML_BIBLES_DUP/aligned/*/Nepali.txt > $NE_ROOT/bible_dup.$SRC-$NE_TGT.$NE_TGT
REMOVE_FILE_PATHS+=( bible-corpus-1.2.1 bible.tar.gz $BIBLE_TOOLS $XML_BIBLES $XML_BIBLES_DUP )
# Download and extract the Penn Treebank dataset
NE_TAGGED=$ROOT/new_submissions_parallel_corpus_project_Nepal
NE_TAGGED_URL="http://www.cle.org.pk/Downloads/ling_resources/parallelcorpus/NepaliTaggedCorpus.zip"
EN_TAGGED_PATCH_URL="https://dl.fbaipublicfiles.com/fairseq/data/nepali-penn-treebank.en.patch"
NE_TAGGED_PATCH_URL="https://dl.fbaipublicfiles.com/fairseq/data/nepali-penn-treebank.ne.patch"
MOSES=mosesdecoder
MOSES_TOK=$MOSES/scripts/tokenizer
EN_PATCH_REGEX="{s:\\\/:\/:g;s/\*\T\*\-\n+//g;s/\-LCB\-/\{/g;s/\-RCB\-/\}/g; s/\-LSB\-/\[/g; s/\-RSB\-/\]/g;s/\-LRB\-/\(/g; s/\-RRB\-/\)/g; s/\'\'/\"/g; s/\`\`/\"/g; s/\ +\'s\ +/\'s /g; s/\ +\'re\ +/\'re /g; s/\"\ +/\"/g; s/\ +\"/\"/g; s/\ n't([\ \.\"])/n't\1/g; s/\r+(.)/\1/g;}"
NE_PATCH_REGEX="{s:\p{Cf}::g;s:\\\/:\/:g;s/\*\T\*\-\n+//g;s/\-LCB\-/\{/g;s/\-RCB\-/\}/g; s/\-LSB\-/\[/g; s/\-RSB\-/\]/g;s/\-LRB\-/\(/g; s/\-RRB\-/\)/g; s/\'\'/\"/g; s/\`\`/\"/g; s/\ +\'s\ +/\'s /g; s/\ +\'re\ +/\'re /g; s/\"\ +/\"/g; s/\ +\"/\"/g; s/\ n't([\ \.\"])/n't\1/g; s/\r+(.)/\1/g;}"
download_data $DATA/nepali-penn-treebank.$SRC.patch $EN_TAGGED_PATCH_URL
download_data $DATA/nepali-penn-treebank.$NE_TGT.patch $NE_TAGGED_PATCH_URL
download_data original.zip $NE_TAGGED_URL
unzip -o original.zip -d $ROOT
cat $NE_TAGGED/00.txt $NE_TAGGED/01.txt $NE_TAGGED/02.txt > $NE_TAGGED/nepali-penn-treebank.$SRC
cat $NE_TAGGED/00ne_revised.txt $NE_TAGGED/01ne_revised.txt $NE_TAGGED/02ne_revised.txt > $NE_TAGGED/nepali-penn-treebank.$NE_TGT
patch $NE_TAGGED/nepali-penn-treebank.$SRC -i $DATA/nepali-penn-treebank.$SRC.patch -o $NE_TAGGED/nepali-penn-treebank-patched.$SRC
patch $NE_TAGGED/nepali-penn-treebank.$NE_TGT -i $DATA/nepali-penn-treebank.$NE_TGT.patch -o $NE_TAGGED/nepali-penn-treebank-patched.$NE_TGT
if [ ! -e $MOSES ]; then
echo "Cloning moses repository..."
git clone https://github.com/moses-smt/mosesdecoder.git
fi
cat $NE_TAGGED/nepali-penn-treebank-patched.$SRC | \
perl -anpe "$EN_PATCH_REGEX" | \
$MOSES_TOK/tokenizer.perl -l $SRC | \
$MOSES_TOK/detokenizer.perl -l $SRC > $NE_ROOT/nepali-penn-treebank.$SRC
cat $NE_TAGGED/nepali-penn-treebank-patched.$NE_TGT | \
perl -CIO -anpe "$NE_PATCH_REGEX" | \
$MOSES_TOK/detokenizer.perl -l $SRC > $NE_ROOT/nepali-penn-treebank.$NE_TGT
# Download nepali dictionary data
NE_DICT=$NE_ROOT/dictionaries
download_data $NE_DICT "http://www.seas.upenn.edu/~nlp/resources/TACL-data-release/dictionaries.tar.gz"
tar xvzf $NE_DICT
cp dictionaries/dict.ne $NE_ROOT/dictionary.$NE_TGT-$SRC
REMOVE_FILE_PATHS+=( $NE_DICT dictionaries )
REMOVE_FILE_PATHS+=( $MOSES $NE_TAGGED original.zip $DATA/nepali-penn-treebank.$SRC.patch $DATA/nepali-penn-treebank.$NE_TGT.patch )
# Remove the temporary files
for ((i=0;i<${#REMOVE_FILE_PATHS[@]};++i)); do
rm -rf ${REMOVE_FILE_PATHS[i]}
done
# Copy the training data
si=si_LK
ne=ne_NP
en=en_XX
cat $SI_ROOT/GNOMEKDEUbuntu.en-si.si $SI_ROOT/OpenSubtitles2018.en-si.si > $DESTDIR/train.$si-$en.$si
cat $SI_ROOT/GNOMEKDEUbuntu.en-si.en $SI_ROOT/OpenSubtitles2018.en-si.en > $DESTDIR/train.$si-$en.$en
cat $NE_ROOT/bible_dup.en-ne.ne $NE_ROOT/bible.en-ne.ne $NE_ROOT/globalvoices.2018q4.ne-en.ne $NE_ROOT/GNOMEKDEUbuntu.en-ne.ne $NE_ROOT/nepali-penn-treebank.ne > $DESTDIR/train.$ne-$en.$ne
cat $NE_ROOT/bible_dup.en-ne.en $NE_ROOT/bible.en-ne.en $NE_ROOT/globalvoices.2018q4.ne-en.en $NE_ROOT/GNOMEKDEUbuntu.en-ne.en $NE_ROOT/nepali-penn-treebank.en > $DESTDIR/train.$ne-$en.$en
#Download the test sets
wget https://github.com/facebookresearch/flores/raw/master/data/wikipedia_en_ne_si_test_sets.tgz
tar -xvzf wikipedia_en_ne_si_test_sets.tgz
cp wikipedia_en_ne_si_test_sets/wikipedia.dev.ne-en.ne $DESTDIR/valid.$ne-$en.$ne
cp wikipedia_en_ne_si_test_sets/wikipedia.dev.ne-en.en $DESTDIR/valid.$ne-$en.$en
cp wikipedia_en_ne_si_test_sets/wikipedia.dev.si-en.si $DESTDIR/valid.$si-$en.$si
cp wikipedia_en_ne_si_test_sets/wikipedia.dev.si-en.en $DESTDIR/valid.$si-$en.$en
cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.ne-en.ne $DESTDIR/devtest.$ne-$en.$ne
cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.ne-en.en $DESTDIR/devtest.$ne-$en.$en
cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.si-en.si $DESTDIR/devtest.$si-$en.$si
cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.si-en.en $DESTDIR/devtest.$si-$en.$en
cp wikipedia_en_ne_si_test_sets/wikipedia.test.ne-en.ne $DESTDIR/test.$ne-$en.$ne
cp wikipedia_en_ne_si_test_sets/wikipedia.test.ne-en.en $DESTDIR/test.$ne-$en.$en
cp wikipedia_en_ne_si_test_sets/wikipedia.test.si-en.si $DESTDIR/test.$si-$en.$si
cp wikipedia_en_ne_si_test_sets/wikipedia.test.si-en.en $DESTDIR/test.$si-$en.$en
rm -rf wikipedia_en_ne_si_test_sets.tgz wikipedia_en_ne_si_test_sets
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_flores_data.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_flores_data.sh",
"repo_id": "COCO-LM",
"token_count": 4807
}
| 186 |
## Training a pointer-generator model on the Extreme Summarization dataset
##### 1. Download the Extreme Summarization data and preprocess it
Follow the instructions [here](https://github.com/EdinburghNLP/XSum) to obtain
the original Extreme Summarization dataset. You should have six files,
{train,validation,test}.{document,summary}.
##### 2. Create a vocabulary and extend it with source position markers
```bash
vocab_size=10000
position_markers=1000
export LC_ALL=C
cat train.document train.summary |
tr -s '[:space:]' '\n' |
sort |
uniq -c |
sort -k1,1bnr -k2 |
head -n "$((vocab_size - 4))" |
awk '{ print $2 " " $1 }' >dict.pg.txt
python3 -c "[print('<unk-{}> 0'.format(n)) for n in range($position_markers)]" >>dict.pg.txt
```
This creates the file dict.pg.txt that contains the 10k most frequent words,
followed by 1k source position markers:
```
the 4954867
. 4157552
, 3439668
to 2212159
a 1916857
of 1916820
and 1823350
...
<unk-0> 0
<unk-1> 0
<unk-2> 0
<unk-3> 0
<unk-4> 0
...
```
##### 2. Preprocess the text data
```bash
./preprocess.py --source train.document --target train.summary --vocab <(cut -d' ' -f1 dict.pg.txt) --source-out train.pg.src --target-out train.pg.tgt
./preprocess.py --source validation.document --target validation.summary --vocab <(cut -d' ' -f1 dict.pg.txt) --source-out valid.pg.src --target-out valid.pg.tgt
./preprocess.py --source test.document --vocab <(cut -d' ' -f1 dict.pg.txt) --source-out test.pg.src
```
The data should now contain `<unk-N>` tokens in place of out-of-vocabulary words.
##### 3. Binarize the dataset:
```bash
fairseq-preprocess \
--source-lang src \
--target-lang tgt \
--trainpref train.pg \
--validpref valid.pg \
--destdir bin \
--workers 60 \
--srcdict dict.pg.txt \
--joined-dictionary
```
##### 3. Train a model
```bash
total_updates=20000
warmup_updates=500
lr=0.001
max_tokens=4096
update_freq=4
pointer_layer=-2
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train bin \
--user-dir examples/pointer_generator/pointer_generator_src \
--max-tokens "$max_tokens" \
--task translation \
--source-lang src --target-lang tgt \
--truncate-source \
--layernorm-embedding \
--share-all-embeddings \
--encoder-normalize-before \
--decoder-normalize-before \
--required-batch-size-multiple 1 \
--arch transformer_pointer_generator \
--alignment-layer "$pointer_layer" \
--alignment-heads 1 \
--source-position-markers 1000 \
--criterion label_smoothed_cross_entropy \
--label-smoothing 0.1 \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.01 --optimizer adam --adam-betas "(0.9, 0.999)" --adam-eps 1e-08 \
--clip-norm 0.1 \
--lr-scheduler inverse_sqrt --lr "$lr" --max-update "$total_updates" --warmup-updates "$warmup_updates" \
--update-freq "$update_freq" \
--skip-invalid-size-inputs-valid-test
```
Above we specify that our dictionary contains 1000 source position markers, and
that we want to use one attention head from the penultimate decoder layer for
pointing. It should run in 5.5 hours on one node with eight 32GB V100 GPUs. The
logged messages confirm that dictionary indices above 10000 will be mapped to
the `<unk>` embedding:
```
2020-09-24 20:43:53 | INFO | fairseq.tasks.translation | [src] dictionary: 11000 types
2020-09-24 20:43:53 | INFO | fairseq.tasks.translation | [tgt] dictionary: 11000 types
2020-09-24 20:43:53 | INFO | fairseq.data.data_utils | loaded 11332 examples from: bin/valid.src-tgt.src
2020-09-24 20:43:53 | INFO | fairseq.data.data_utils | loaded 11332 examples from: bin/valid.src-tgt.tgt
2020-09-24 20:43:53 | INFO | fairseq.tasks.translation | bin valid src-tgt 11332 examples
2020-09-24 20:43:53 | INFO | fairseq.models.transformer_pg | dictionary indices from 10000 to 10999 will be mapped to 3
```
##### 4. Summarize the test sequences
```bash
batch_size=32
beam_size=6
max_length=60
length_penalty=1.0
fairseq-interactive bin \
--user-dir examples/pointer_generator/pointer_generator_src \
--batch-size "$batch_size" \
--task translation \
--source-lang src --target-lang tgt \
--path checkpoints/checkpoint_last.pt \
--input test.pg.src \
--buffer-size 200 \
--max-len-a 0 \
--max-len-b "$max_length" \
--lenpen "$length_penalty" \
--beam "$beam_size" \
--skip-invalid-size-inputs-valid-test |
tee generate.out
grep ^H generate.out | cut -f 3- >generate.hyp
```
Now you should have the generated sequences in `generate.hyp`. They contain
`<unk-N>` tokens that the model has copied from the source sequence. In order to
retrieve the original words, we need the unprocessed source sequences from
`test.document`.
##### 5. Process the generated output
Since we skipped too long inputs when producing `generate.hyp`, we also have to
skip too long sequences now that we read `test.document`.
```bash
./postprocess.py \
--source <(awk 'NF<1024' test.document) \
--target generate.hyp \
--target-out generate.hyp.processed
```
Now you'll find the final sequences from `generate.hyp.processed`, with
`<unk-N>` replaced with the original word from the source sequence.
##### An example of a summarized sequence
The original source document in `test.document`:
> de roon moved to teesside in june 2016 for an initial # 8.8 m fee and played 33 premier league games last term . the netherlands international , 26 , scored five goals in 36 league and cup games during his spell at boro . meanwhile , manager garry monk confirmed the championship club 's interest in signing chelsea midfielder lewis baker . `` he 's a target and one of many that we 've had throughout the summer months , '' said monk . find all the latest football transfers on our dedicated page .
The preprocessed source document in `test.src.pg`:
> de \<unk-1> moved to \<unk-4> in june 2016 for an initial # \<unk-12> m fee and played 33 premier league games last term . the netherlands international , 26 , scored five goals in 36 league and cup games during his spell at boro . meanwhile , manager garry monk confirmed the championship club 's interest in signing chelsea midfielder lewis baker . `` he 's a target and one of many that we 've had throughout the summer months , '' said monk . find all the latest football transfers on our dedicated page .
The generated summary in `generate.hyp`:
> middlesbrough striker \<unk> de \<unk-1> has joined spanish side \<unk> on a season-long loan .
The generated summary after postprocessing in `generate.hyp.processed`:
> middlesbrough striker \<unk> de roon has joined spanish side \<unk> on a season-long loan .
|
COCO-LM/fairseq/examples/pointer_generator/README.xsum.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/pointer_generator/README.xsum.md",
"repo_id": "COCO-LM",
"token_count": 2249
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
transformer_vaswani_wmt_en_fr_big,
)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("transformer_unidirectional")
class TransformerUnidirectionalModel(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerMonotonicEncoder(args, src_dict, embed_tokens)
@register_model("transformer_monotonic")
class TransformerModelSimulTrans(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerMonotonicEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
def _indices_from_states(self, states):
if type(states["indices"]["src"]) == list:
if next(self.parameters()).is_cuda:
tensor = torch.cuda.LongTensor
else:
tensor = torch.LongTensor
src_indices = tensor(
[states["indices"]["src"][: 1 + states["steps"]["src"]]]
)
tgt_indices = tensor(
[[self.decoder.dictionary.eos()] + states["indices"]["tgt"]]
)
else:
src_indices = states["indices"]["src"][: 1 + states["steps"]["src"]]
tgt_indices = states["indices"]["tgt"]
return src_indices, None, tgt_indices
class TransformerMonotonicEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.dictionary = dictionary
self.layers = nn.ModuleList([])
self.layers.extend(
[TransformerMonotonicEncoderLayer(args) for i in range(args.encoder_layers)]
)
class TransformerMonotonicDecoder(TransformerDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False)
self.dictionary = dictionary
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerMonotonicDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
def pre_attention(
self, prev_output_tokens, encoder_out_dict, incremental_state=None
):
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_out = encoder_out_dict["encoder_out"][0]
encoder_padding_mask = (
encoder_out_dict["encoder_padding_mask"][0]
if len(encoder_out_dict["encoder_padding_mask"]) > 0
else None
)
return x, encoder_out, encoder_padding_mask
def post_attention(self, x):
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x
def clear_cache(self, incremental_state, end_id=None):
"""
Clear cache in the monotonic layers.
The cache is generated because of a forward pass of decode but no prediction.
end_id is the last idx of the layers
"""
if end_id is None:
end_id = len(self.layers)
for j in range(end_id):
self.layers[j].prune_incremental_state(incremental_state)
def extract_features(
self, prev_output_tokens, encoder_out, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# incremental_state = None
(x, encoder_outs, encoder_padding_mask) = self.pre_attention(
prev_output_tokens, encoder_out, incremental_state
)
attn = None
inner_states = [x]
attn_list = []
step_list = []
for i, layer in enumerate(self.layers):
x, attn, _ = layer(
x=x,
encoder_out=encoder_outs,
encoder_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
attn_list.append(attn)
if incremental_state is not None:
curr_steps = layer.get_head_steps(incremental_state)
step_list.append(curr_steps)
if incremental_state.get("online", True):
# Online indicates that the encoder states are still changing
p_choose = (
attn["p_choose"]
.squeeze(0)
.squeeze(1)
.gather(1, curr_steps.t())
)
new_steps = curr_steps + (p_choose < 0.5).t().type_as(curr_steps)
if (new_steps >= incremental_state["steps"]["src"]).any():
# We need to prune the last self_attn saved_state
# if model decide not to read
# otherwise there will be duplicated saved_state
self.clear_cache(incremental_state, i + 1)
return x, {"action": 0}
x = self.post_attention(x)
return x, {
"action": 1,
"attn_list": attn_list,
"step_list": step_list,
"encoder_out": encoder_out,
"encoder_padding_mask": encoder_padding_mask,
}
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
if "fastest_step" in incremental_state:
incremental_state["fastest_step"] = incremental_state[
"fastest_step"
].index_select(0, new_order)
@register_model_architecture("transformer_monotonic", "transformer_monotonic")
def base_monotonic_architecture(args):
base_architecture(args)
args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False)
@register_model_architecture(
"transformer_monotonic", "transformer_monotonic_iwslt_de_en"
)
def transformer_monotonic_iwslt_de_en(args):
transformer_iwslt_de_en(args)
base_monotonic_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_de_big"
)
def transformer_monotonic_vaswani_wmt_en_de_big(args):
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture(
"transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_fr_big"
)
def transformer_monotonic_vaswani_wmt_en_fr_big(args):
transformer_monotonic_vaswani_wmt_en_fr_big(args)
@register_model_architecture(
"transformer_unidirectional", "transformer_unidirectional_iwslt_de_en"
)
def transformer_unidirectional_iwslt_de_en(args):
transformer_iwslt_de_en(args)
|
COCO-LM/fairseq/examples/simultaneous_translation/models/transformer_monotonic_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/models/transformer_monotonic_attention.py",
"repo_id": "COCO-LM",
"token_count": 4284
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
|
COCO-LM/fairseq/examples/speech_recognition/data/collaters.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/data/collaters.py",
"repo_id": "COCO-LM",
"token_count": 2220
}
| 189 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from collections import deque
from enum import Enum
import numpy as np
"""
Utility modules for computation of Word Error Rate,
Alignments, as well as more granular metrics like
deletion, insersion and substitutions.
"""
class Code(Enum):
match = 1
substitution = 2
insertion = 3
deletion = 4
class Token(object):
def __init__(self, lbl="", st=np.nan, en=np.nan):
if np.isnan(st):
self.label, self.start, self.end = "", 0.0, 0.0
else:
self.label, self.start, self.end = lbl, st, en
class AlignmentResult(object):
def __init__(self, refs, hyps, codes, score):
self.refs = refs # std::deque<int>
self.hyps = hyps # std::deque<int>
self.codes = codes # std::deque<Code>
self.score = score # float
def coordinate_to_offset(row, col, ncols):
return int(row * ncols + col)
def offset_to_row(offset, ncols):
return int(offset / ncols)
def offset_to_col(offset, ncols):
return int(offset % ncols)
def trimWhitespace(str):
return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str)))
def str2toks(str):
pieces = trimWhitespace(str).split(" ")
toks = []
for p in pieces:
toks.append(Token(p, 0.0, 0.0))
return toks
class EditDistance(object):
def __init__(self, time_mediated):
self.time_mediated_ = time_mediated
self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic>
self.backtraces_ = (
np.nan
) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_;
self.confusion_pairs_ = {}
def cost(self, ref, hyp, code):
if self.time_mediated_:
if code == Code.match:
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end)
elif code == Code.insertion:
return hyp.end - hyp.start
elif code == Code.deletion:
return ref.end - ref.start
else: # substitution
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1
else:
if code == Code.match:
return 0
elif code == Code.insertion or code == Code.deletion:
return 3
else: # substitution
return 4
def get_result(self, refs, hyps):
res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
num_rows, num_cols = self.scores_.shape
res.score = self.scores_[num_rows - 1, num_cols - 1]
curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols)
while curr_offset != 0:
curr_row = offset_to_row(curr_offset, num_cols)
curr_col = offset_to_col(curr_offset, num_cols)
prev_offset = self.backtraces_[curr_row, curr_col]
prev_row = offset_to_row(prev_offset, num_cols)
prev_col = offset_to_col(prev_offset, num_cols)
res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++
res.hyps.appendleft(curr_col - 1)
if curr_row - 1 == prev_row and curr_col == prev_col:
res.codes.appendleft(Code.deletion)
elif curr_row == prev_row and curr_col - 1 == prev_col:
res.codes.appendleft(Code.insertion)
else:
# assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col)
ref_str = refs[res.refs[0]].label
hyp_str = hyps[res.hyps[0]].label
if ref_str == hyp_str:
res.codes.appendleft(Code.match)
else:
res.codes.appendleft(Code.substitution)
confusion_pair = "%s -> %s" % (ref_str, hyp_str)
if confusion_pair not in self.confusion_pairs_:
self.confusion_pairs_[confusion_pair] = 1
else:
self.confusion_pairs_[confusion_pair] += 1
curr_offset = prev_offset
return res
def align(self, refs, hyps):
if len(refs) == 0 and len(hyps) == 0:
return np.nan
# NOTE: we're not resetting the values in these matrices because every value
# will be overridden in the loop below. If this assumption doesn't hold,
# be sure to set all entries in self.scores_ and self.backtraces_ to 0.
self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1))
self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1))
num_rows, num_cols = self.scores_.shape
for i in range(num_rows):
for j in range(num_cols):
if i == 0 and j == 0:
self.scores_[i, j] = 0.0
self.backtraces_[i, j] = 0
continue
if i == 0:
self.scores_[i, j] = self.scores_[i, j - 1] + self.cost(
None, hyps[j - 1], Code.insertion
)
self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols)
continue
if j == 0:
self.scores_[i, j] = self.scores_[i - 1, j] + self.cost(
refs[i - 1], None, Code.deletion
)
self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols)
continue
# Below here both i and j are greater than 0
ref = refs[i - 1]
hyp = hyps[j - 1]
best_score = self.scores_[i - 1, j - 1] + (
self.cost(ref, hyp, Code.match)
if (ref.label == hyp.label)
else self.cost(ref, hyp, Code.substitution)
)
prev_row = i - 1
prev_col = j - 1
ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion)
if ins < best_score:
best_score = ins
prev_row = i
prev_col = j - 1
delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion)
if delt < best_score:
best_score = delt
prev_row = i - 1
prev_col = j
self.scores_[i, j] = best_score
self.backtraces_[i, j] = coordinate_to_offset(
prev_row, prev_col, num_cols
)
return self.get_result(refs, hyps)
class WERTransformer(object):
def __init__(self, hyp_str, ref_str, verbose=True):
self.ed_ = EditDistance(False)
self.id2oracle_errs_ = {}
self.utts_ = 0
self.words_ = 0
self.insertions_ = 0
self.deletions_ = 0
self.substitutions_ = 0
self.process(["dummy_str", hyp_str, ref_str])
if verbose:
print("'%s' vs '%s'" % (hyp_str, ref_str))
self.report_result()
def process(self, input): # std::vector<std::string>&& input
if len(input) < 3:
print(
"Input must be of the form <id> ... <hypo> <ref> , got ",
len(input),
" inputs:",
)
return None
# Align
# std::vector<Token> hyps;
# std::vector<Token> refs;
hyps = str2toks(input[-2])
refs = str2toks(input[-1])
alignment = self.ed_.align(refs, hyps)
if alignment is None:
print("Alignment is null")
return np.nan
# Tally errors
ins = 0
dels = 0
subs = 0
for code in alignment.codes:
if code == Code.substitution:
subs += 1
elif code == Code.insertion:
ins += 1
elif code == Code.deletion:
dels += 1
# Output
row = input
row.append(str(len(refs)))
row.append(str(ins))
row.append(str(dels))
row.append(str(subs))
# print(row)
# Accumulate
kIdIndex = 0
kNBestSep = "/"
pieces = input[kIdIndex].split(kNBestSep)
if len(pieces) == 0:
print(
"Error splitting ",
input[kIdIndex],
" on '",
kNBestSep,
"', got empty list",
)
return np.nan
id = pieces[0]
if id not in self.id2oracle_errs_:
self.utts_ += 1
self.words_ += len(refs)
self.insertions_ += ins
self.deletions_ += dels
self.substitutions_ += subs
self.id2oracle_errs_[id] = [ins, dels, subs]
else:
curr_err = ins + dels + subs
prev_err = np.sum(self.id2oracle_errs_[id])
if curr_err < prev_err:
self.id2oracle_errs_[id] = [ins, dels, subs]
return 0
def report_result(self):
# print("---------- Summary ---------------")
if self.words_ == 0:
print("No words counted")
return
# 1-best
best_wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
print(
"\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, "
"%0.2f%% dels, %0.2f%% subs)"
% (
best_wer,
self.utts_,
self.words_,
100.0 * self.insertions_ / self.words_,
100.0 * self.deletions_ / self.words_,
100.0 * self.substitutions_ / self.words_,
)
)
def wer(self):
if self.words_ == 0:
wer = np.nan
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
return wer
def stats(self):
if self.words_ == 0:
stats = {}
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
stats = dict(
{
"wer": wer,
"utts": self.utts_,
"numwords": self.words_,
"ins": self.insertions_,
"dels": self.deletions_,
"subs": self.substitutions_,
"confusion_pairs": self.ed_.confusion_pairs_,
}
)
return stats
def calc_wer(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.wer()
def calc_wer_stats(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.stats()
def get_wer_alignment_codes(hyp_str, ref_str):
"""
INPUT: hypothesis string, reference string
OUTPUT: List of alignment codes (intermediate results from WER computation)
"""
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
def merge_counts(x, y):
# Merge two hashes which have 'counts' as their values
# This can be used for example to merge confusion pair counts
# conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs'])
for k, v in y.items():
if k not in x:
x[k] = 0
x[k] += v
return x
|
COCO-LM/fairseq/examples/speech_recognition/utils/wer_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/utils/wer_utils.py",
"repo_id": "COCO-LM",
"token_count": 6380
}
| 190 |
# Neural Machine Translation
This README contains instructions for [using pretrained translation models](#example-usage-torchhub)
as well as [training new models](#training-a-new-model).
## Pre-trained models
Model | Description | Dataset | Download
---|---|---|---
`conv.wmt14.en-fr` | Convolutional <br> ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2) <br> newstest2012/2013: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.ntst1213.tar.bz2)
`conv.wmt14.en-de` | Convolutional <br> ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-de.newstest2014.tar.bz2)
`conv.wmt17.en-de` | Convolutional <br> ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT17 English-German](http://statmt.org/wmt17/translation-task.html#Download) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.v2.en-de.newstest2014.tar.bz2)
`transformer.wmt14.en-fr` | Transformer <br> ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2)
`transformer.wmt16.en-de` | Transformer <br> ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2)
`transformer.wmt18.en-de` | Transformer <br> ([Edunov et al., 2018](https://arxiv.org/abs/1808.09381)) <br> WMT'18 winner | [WMT'18 English-German](http://www.statmt.org/wmt18/translation-task.html) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz) <br> See NOTE in the archive
`transformer.wmt19.en-de` | Transformer <br> ([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) <br> WMT'19 winner | [WMT'19 English-German](http://www.statmt.org/wmt19/translation-task.html) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz)
`transformer.wmt19.de-en` | Transformer <br> ([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) <br> WMT'19 winner | [WMT'19 German-English](http://www.statmt.org/wmt19/translation-task.html) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz)
`transformer.wmt19.en-ru` | Transformer <br> ([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) <br> WMT'19 winner | [WMT'19 English-Russian](http://www.statmt.org/wmt19/translation-task.html) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz)
`transformer.wmt19.ru-en` | Transformer <br> ([Ng et al., 2019](https://arxiv.org/abs/1907.06616)) <br> WMT'19 winner | [WMT'19 Russian-English](http://www.statmt.org/wmt19/translation-task.html) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz)
## Example usage (torch.hub)
We require a few additional Python dependencies for preprocessing:
```bash
pip install fastBPE sacremoses subword_nmt
```
Interactive translation via PyTorch Hub:
```python
import torch
# List available models
torch.hub.list('pytorch/fairseq') # [..., 'transformer.wmt16.en-de', ... ]
# Load a transformer trained on WMT'16 En-De
# Note: WMT'19 models use fastBPE instead of subword_nmt, see instructions below
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt16.en-de',
tokenizer='moses', bpe='subword_nmt')
en2de.eval() # disable dropout
# The underlying model is available under the *models* attribute
assert isinstance(en2de.models[0], fairseq.models.transformer.TransformerModel)
# Move model to GPU for faster translation
en2de.cuda()
# Translate a sentence
en2de.translate('Hello world!')
# 'Hallo Welt!'
# Batched translation
en2de.translate(['Hello world!', 'The cat sat on the mat.'])
# ['Hallo Welt!', 'Die Katze saß auf der Matte.']
```
Loading custom models:
```python
from fairseq.models.transformer import TransformerModel
zh2en = TransformerModel.from_pretrained(
'/path/to/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='data-bin/wmt17_zh_en_full',
bpe='subword_nmt',
bpe_codes='data-bin/wmt17_zh_en_full/zh.code'
)
zh2en.translate('你好 世界')
# 'Hello World'
```
If you are using a `transformer.wmt19` models, you will need to set the `bpe`
argument to `'fastbpe'` and (optionally) load the 4-model ensemble:
```python
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de',
checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt',
tokenizer='moses', bpe='fastbpe')
en2de.eval() # disable dropout
```
## Example usage (CLI tools)
Generation with the binarized test sets can be run in batch mode as follows, e.g. for WMT 2014 English-French on a GTX-1080ti:
```bash
mkdir -p data-bin
curl https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2 | tar xvjf - -C data-bin
curl https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2 | tar xvjf - -C data-bin
fairseq-generate data-bin/wmt14.en-fr.newstest2014 \
--path data-bin/wmt14.en-fr.fconv-py/model.pt \
--beam 5 --batch-size 128 --remove-bpe | tee /tmp/gen.out
# ...
# | Translated 3003 sentences (96311 tokens) in 166.0s (580.04 tokens/s)
# | Generate test with beam=5: BLEU4 = 40.83, 67.5/46.9/34.4/25.5 (BP=1.000, ratio=1.006, syslen=83262, reflen=82787)
# Compute BLEU score
grep ^H /tmp/gen.out | cut -f3- > /tmp/gen.out.sys
grep ^T /tmp/gen.out | cut -f2- > /tmp/gen.out.ref
fairseq-score --sys /tmp/gen.out.sys --ref /tmp/gen.out.ref
# BLEU4 = 40.83, 67.5/46.9/34.4/25.5 (BP=1.000, ratio=1.006, syslen=83262, reflen=82787)
```
## Training a new model
### IWSLT'14 German to English (Transformer)
The following instructions can be used to train a Transformer model on the [IWSLT'14 German to English dataset](http://workshop2014.iwslt.org/downloads/proceeding.pdf).
First download and preprocess the data:
```bash
# Download and prepare the data
cd examples/translation/
bash prepare-iwslt14.sh
cd ../..
# Preprocess/binarize the data
TEXT=examples/translation/iwslt14.tokenized.de-en
fairseq-preprocess --source-lang de --target-lang en \
--trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \
--destdir data-bin/iwslt14.tokenized.de-en \
--workers 20
```
Next we'll train a Transformer translation model over this data:
```bash
CUDA_VISIBLE_DEVICES=0 fairseq-train \
data-bin/iwslt14.tokenized.de-en \
--arch transformer_iwslt_de_en --share-decoder-input-output-embed \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr 5e-4 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
--dropout 0.3 --weight-decay 0.0001 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 4096 \
--eval-bleu \
--eval-bleu-args '{"beam": 5, "max_len_a": 1.2, "max_len_b": 10}' \
--eval-bleu-detok moses \
--eval-bleu-remove-bpe \
--eval-bleu-print-samples \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric
```
Finally we can evaluate our trained model:
```bash
fairseq-generate data-bin/iwslt14.tokenized.de-en \
--path checkpoints/checkpoint_best.pt \
--batch-size 128 --beam 5 --remove-bpe
```
### WMT'14 English to German (Convolutional)
The following instructions can be used to train a Convolutional translation model on the WMT English to German dataset.
See the [Scaling NMT README](../scaling_nmt/README.md) for instructions to train a Transformer translation model on this data.
The WMT English to German dataset can be preprocessed using the `prepare-wmt14en2de.sh` script.
By default it will produce a dataset that was modeled after [Attention Is All You Need (Vaswani et al., 2017)](https://arxiv.org/abs/1706.03762), but with additional news-commentary-v12 data from WMT'17.
To use only data available in WMT'14 or to replicate results obtained in the original [Convolutional Sequence to Sequence Learning (Gehring et al., 2017)](https://arxiv.org/abs/1705.03122) paper, please use the `--icml17` option.
```bash
# Download and prepare the data
cd examples/translation/
# WMT'17 data:
bash prepare-wmt14en2de.sh
# or to use WMT'14 data:
# bash prepare-wmt14en2de.sh --icml17
cd ../..
# Binarize the dataset
TEXT=examples/translation/wmt17_en_de
fairseq-preprocess \
--source-lang en --target-lang de \
--trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \
--destdir data-bin/wmt17_en_de --thresholdtgt 0 --thresholdsrc 0 \
--workers 20
# Train the model
mkdir -p checkpoints/fconv_wmt_en_de
fairseq-train \
data-bin/wmt17_en_de \
--arch fconv_wmt_en_de \
--dropout 0.2 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--optimizer nag --clip-norm 0.1 \
--lr 0.5 --lr-scheduler fixed --force-anneal 50 \
--max-tokens 4000 \
--save-dir checkpoints/fconv_wmt_en_de
# Evaluate
fairseq-generate data-bin/wmt17_en_de \
--path checkpoints/fconv_wmt_en_de/checkpoint_best.pt \
--beam 5 --remove-bpe
```
### WMT'14 English to French
```bash
# Download and prepare the data
cd examples/translation/
bash prepare-wmt14en2fr.sh
cd ../..
# Binarize the dataset
TEXT=examples/translation/wmt14_en_fr
fairseq-preprocess \
--source-lang en --target-lang fr \
--trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \
--destdir data-bin/wmt14_en_fr --thresholdtgt 0 --thresholdsrc 0 \
--workers 60
# Train the model
mkdir -p checkpoints/fconv_wmt_en_fr
fairseq-train \
data-bin/wmt14_en_fr \
--arch fconv_wmt_en_fr \
--dropout 0.1 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--optimizer nag --clip-norm 0.1 \
--lr 0.5 --lr-scheduler fixed --force-anneal 50 \
--max-tokens 3000 \
--save-dir checkpoints/fconv_wmt_en_fr
# Evaluate
fairseq-generate \
data-bin/fconv_wmt_en_fr \
--path checkpoints/fconv_wmt_en_fr/checkpoint_best.pt \
--beam 5 --remove-bpe
```
## Multilingual Translation
We also support training multilingual translation models. In this example we'll
train a multilingual `{de,fr}-en` translation model using the IWSLT'17 datasets.
Note that we use slightly different preprocessing here than for the IWSLT'14
En-De data above. In particular we learn a joint BPE code for all three
languages and use fairseq-interactive and sacrebleu for scoring the test set.
```bash
# First install sacrebleu and sentencepiece
pip install sacrebleu sentencepiece
# Then download and preprocess the data
cd examples/translation/
bash prepare-iwslt17-multilingual.sh
cd ../..
# Binarize the de-en dataset
TEXT=examples/translation/iwslt17.de_fr.en.bpe16k
fairseq-preprocess --source-lang de --target-lang en \
--trainpref $TEXT/train.bpe.de-en \
--validpref $TEXT/valid0.bpe.de-en,$TEXT/valid1.bpe.de-en,$TEXT/valid2.bpe.de-en,$TEXT/valid3.bpe.de-en,$TEXT/valid4.bpe.de-en,$TEXT/valid5.bpe.de-en \
--destdir data-bin/iwslt17.de_fr.en.bpe16k \
--workers 10
# Binarize the fr-en dataset
# NOTE: it's important to reuse the en dictionary from the previous step
fairseq-preprocess --source-lang fr --target-lang en \
--trainpref $TEXT/train.bpe.fr-en \
--validpref $TEXT/valid0.bpe.fr-en,$TEXT/valid1.bpe.fr-en,$TEXT/valid2.bpe.fr-en,$TEXT/valid3.bpe.fr-en,$TEXT/valid4.bpe.fr-en,$TEXT/valid5.bpe.fr-en \
--tgtdict data-bin/iwslt17.de_fr.en.bpe16k/dict.en.txt \
--destdir data-bin/iwslt17.de_fr.en.bpe16k \
--workers 10
# Train a multilingual transformer model
# NOTE: the command below assumes 1 GPU, but accumulates gradients from
# 8 fwd/bwd passes to simulate training on 8 GPUs
mkdir -p checkpoints/multilingual_transformer
CUDA_VISIBLE_DEVICES=0 fairseq-train data-bin/iwslt17.de_fr.en.bpe16k/ \
--max-epoch 50 \
--ddp-backend=legacy_ddp \
--task multilingual_translation --lang-pairs de-en,fr-en \
--arch multilingual_transformer_iwslt_de_en \
--share-decoders --share-decoder-input-output-embed \
--optimizer adam --adam-betas '(0.9, 0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--warmup-updates 4000 --warmup-init-lr '1e-07' \
--label-smoothing 0.1 --criterion label_smoothed_cross_entropy \
--dropout 0.3 --weight-decay 0.0001 \
--save-dir checkpoints/multilingual_transformer \
--max-tokens 4000 \
--update-freq 8
# Generate and score the test set with sacrebleu
SRC=de
sacrebleu --test-set iwslt17 --language-pair ${SRC}-en --echo src \
| python scripts/spm_encode.py --model examples/translation/iwslt17.de_fr.en.bpe16k/sentencepiece.bpe.model \
> iwslt17.test.${SRC}-en.${SRC}.bpe
cat iwslt17.test.${SRC}-en.${SRC}.bpe \
| fairseq-interactive data-bin/iwslt17.de_fr.en.bpe16k/ \
--task multilingual_translation --lang-pairs de-en,fr-en \
--source-lang ${SRC} --target-lang en \
--path checkpoints/multilingual_transformer/checkpoint_best.pt \
--buffer-size 2000 --batch-size 128 \
--beam 5 --remove-bpe=sentencepiece \
> iwslt17.test.${SRC}-en.en.sys
grep ^H iwslt17.test.${SRC}-en.en.sys | cut -f3 \
| sacrebleu --test-set iwslt17 --language-pair ${SRC}-en
```
##### Argument format during inference
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
|
COCO-LM/fairseq/examples/translation/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/translation/README.md",
"repo_id": "COCO-LM",
"token_count": 5913
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import numpy as np
aggregate_funcs = {
"std": np.std,
"var": np.var,
"median": np.median,
"mean": np.mean,
"min": np.min,
"max": np.max,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", required=True, type=str)
parser.add_argument("-n", "--repeat_times", required=True, type=int)
parser.add_argument("-o", "--output_file", required=False)
parser.add_argument("-f", "--func", required=False, default="mean")
args = parser.parse_args()
stream = open(args.output_file, "w") if args.output_file else sys.stdout
segment_scores = []
for line in open(args.input_file):
segment_scores.append(float(line.strip()))
if len(segment_scores) == args.repeat_times:
stream.write("{}\n".format(aggregate_funcs[args.func](segment_scores)))
segment_scores = []
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/unsupervised_quality_estimation/aggregate_scores.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/unsupervised_quality_estimation/aggregate_scores.py",
"repo_id": "COCO-LM",
"token_count": 443
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import collections
import contextlib
import logging
import os
import re
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import Container, DictConfig, open_dict, OmegaConf
logger = logging.getLogger(__name__)
def ckp_copy_fun(src, checkpoints, end_of_epoch, cfg):
for cp in checkpoints:
try:
if src != cp:
logger.info("copy {} to {}".format(src, cp))
PathManager.copy(src, cp, overwrite=True)
except:
logger.info("copy failed, please copy it manaully")
try:
def remove_ckps(root_path):
if not end_of_epoch and cfg.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
root_path, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[cfg.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
logger.info("removed {}".format(old_chk))
if cfg.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(root_path, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[cfg.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
logger.info("removed {}".format(old_chk))
if cfg.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
root_path,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(
cfg.best_checkpoint_metric
),
)
if not cfg.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
logger.info("removed {}".format(old_chk))
remove_ckps(cfg.tmp_save_dir)
remove_ckps(cfg.save_dir)
except:
logger.info("remove old ckps error")
logger.info("finished async ckp saving.")
def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss, ckp_copy_thread):
from fairseq import meters
# only one worker should attempt to create the required dir
if trainer.data_parallel_rank == 0:
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if cfg.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer()
if not trainer.should_save_checkpoint_on_current_rank:
return
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates")
def is_better(a, b):
return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
suffix = trainer.checkpoint_suffix
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and cfg.keep_best_checkpoints > 0:
checkpoint_conds[
"checkpoint.best_{}_{:.2f}.pt".format(cfg.best_checkpoint_metric, val_loss)
] = not hasattr(save_checkpoint, "best") or is_better(
val_loss, save_checkpoint.best
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not cfg.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
tmp_checkpoints = [
os.path.join(cfg.tmp_save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(tmp_checkpoints[0], extra_state)
if ckp_copy_thread is not None:
assert not cfg.write_checkpoints_asynchronously
ckp_copy_thread.apply_async(ckp_copy_fun, (tmp_checkpoints[0], checkpoints, end_of_epoch, cfg))
# for cp in checkpoints[1:]:
# if cfg.write_checkpoints_asynchronously:
# # TODO[ioPath]: Need to implement a delayed asynchronous
# # file copying/moving feature.
# logger.warning(
# f"ioPath is not copying {checkpoints[0]} to {cp} "
# "since async write mode is on."
# )
# else:
# assert PathManager.copy(
# checkpoints[0], cp, overwrite=True
# ), f"Failed to copy {checkpoints[0]} to {cp}"
write_timer.stop()
logger.info(
"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
tmp_checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
# if not end_of_epoch and cfg.keep_interval_updates > 0:
# # remove old checkpoints; checkpoints are sorted in descending order
# checkpoints = checkpoint_paths(
# cfg.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
# )
# for old_chk in checkpoints[cfg.keep_interval_updates :]:
# if os.path.lexists(old_chk):
# os.remove(old_chk)
# if cfg.keep_last_epochs > 0:
# # remove old epoch checkpoints; checkpoints are sorted in descending order
# checkpoints = checkpoint_paths(cfg.save_dir, pattern=r"checkpoint(\d+)\.pt")
# for old_chk in checkpoints[cfg.keep_last_epochs :]:
# if os.path.lexists(old_chk):
# os.remove(old_chk)
# if cfg.keep_best_checkpoints > 0:
# # only keep the best N checkpoints according to validation metric
# checkpoints = checkpoint_paths(
# cfg.save_dir,
# pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(
# cfg.best_checkpoint_metric
# ),
# )
# if not cfg.maximize_best_checkpoint_metric:
# checkpoints = checkpoints[::-1]
# for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
# if os.path.lexists(old_chk):
# os.remove(old_chk)
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if cfg.finetune_from_model is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = trainer.checkpoint_suffix
if (
cfg.restore_file == "checkpoint_last.pt"
): # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path)
if cfg.finetune_from_model is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(cfg.finetune_from_model):
checkpoint_path = cfg.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(
f"loading pretrained model from {checkpoint_path}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
else:
raise ValueError(
f"--funetune-from-model {cfg.finetune_from_model} does not exist"
)
elif suffix is not None:
checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = cfg.restore_file
if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model:
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(cfg)
)
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
state["cfg"] = OmegaConf.create(state["cfg"])
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
# state = _upgrade_state_dict(state)
return state
def load_model_ensemble(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble, args, _task = load_model_ensemble_and_task(
filenames,
arg_overrides,
task,
strict,
suffix,
num_shards,
state,
)
return ensemble, args
def load_model_ensemble_and_task(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
assert state is None or len(filenames) == 1
from fairseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
assert num_shards > 0
for shard_idx in range(num_shards):
if num_shards == 1:
filename = filename.replace(".pt", suffix + ".pt")
else:
filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
if task is None:
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
# build model for ensemble
model = task.build_model(cfg.model)
model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model)
# reset state so it gets loaded for the next model in ensemble
state = None
ensemble.append(model)
return ensemble, cfg, task
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(obj, filename, async_write: bool = False):
if async_write:
with PathManager.opena(filename, "wb") as f:
_torch_persistent_save(obj, f)
else:
if PathManager.supports_rename(filename):
# do atomic save
with PathManager.open(filename + ".tmp", "wb") as f:
_torch_persistent_save(obj, f)
PathManager.rename(filename + ".tmp", filename)
else:
# fallback to non-atomic save
with PathManager.open(filename, "wb") as f:
_torch_persistent_save(obj, f)
def _torch_persistent_save(obj, f):
if isinstance(f, str):
with PathManager.open(f, "wb") as h:
torch_persistent_save(obj, h)
return
for i in range(3):
try:
return torch.save(obj, f)
except Exception:
if i == 2:
logger.error(traceback.format_exc())
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
from fairseq import models, registry, tasks
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# old model checkpoints may not have separate source/target positions
if "args" in state and hasattr(state["args"], "max_positions") and not hasattr(
state["args"], "max_source_positions"
):
state["args"].max_source_positions = state["args"].max_positions
state["args"].max_target_positions = state["args"].max_positions
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# backward compatibility, cfg updates
if "args" in state and state["args"] is not None:
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# --raw-text and --lazy-load are deprecated
if getattr(state["args"], "raw_text", False):
state["args"].dataset_impl = "raw"
elif getattr(state["args"], "lazy_load", False):
state["args"].dataset_impl = "lazy"
# epochs start at 1
if state["extra_state"]["train_iterator"] is not None:
state["extra_state"]["train_iterator"]["epoch"] = max(
state["extra_state"]["train_iterator"].get("epoch", 1), 1
)
# --remove-bpe ==> --postprocess
if hasattr(state["args"], "remove_bpe"):
state["args"].post_process = state["args"].remove_bpe
# --min-lr ==> --stop-min-lr
if hasattr(state["args"], "min_lr"):
state["args"].stop_min_lr = state["args"].min_lr
del state["args"].min_lr
# binary_cross_entropy => wav2vec criterion
if (
hasattr(state["args"], "criterion")
and state["args"].criterion == "binary_cross_entropy"
):
state["args"].criterion = "wav2vec"
# speech_pretraining => audio pretraining
if (
hasattr(state["args"], "task")
and state["args"].task == "speech_pretraining"
):
state["args"].task = "audio_pretraining"
# audio_cpc => wav2vec
if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc":
state["args"].arch = "wav2vec"
# convert legacy float learning rate to List[float]
if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float):
state["args"].lr = [state["args"].lr]
# convert task data arg to a string instead of List[string]
if (
hasattr(state["args"], "data")
and isinstance(state["args"].data, list)
and len(state["args"].data) > 0
):
state["args"].data = state["args"].data[0]
state["cfg"] = convert_namespace_to_omegaconf(state["args"])
if "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
with open_dict(cfg):
# any upgrades for Hydra-based configs
if (
"task" in cfg
and "eval_wer_config" in cfg.task
and isinstance(cfg.task.eval_wer_config.print_alignment, bool)
):
cfg.task.eval_wer_config.print_alignment = "hard"
if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool):
cfg.generation.print_alignment = "hard"
if (
"model" in cfg
and "w2v_args" in cfg.model
and cfg.model.w2v_args is not None
and (
hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args
)
and isinstance(
cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool
)
):
cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard"
return state
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
arch = None
if model_cfg is not None:
arch = (
model_cfg._name
if isinstance(model_cfg, DictConfig)
else getattr(model_cfg, "arch", None)
)
if not model_cfg or arch is None or arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
int(layer_string) for layer_string in layers_to_keep.split(",")
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if isinstance(model_cfg, DictConfig):
context = open_dict(model_cfg)
else:
context = contextlib.ExitStack()
with context:
if hasattr(model_cfg, "encoder_layers_to_keep"):
model_cfg.encoder_layers_to_keep = None
if hasattr(model_cfg, "decoder_layers_to_keep"):
model_cfg.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
os.remove(temp_file_path)
|
COCO-LM/fairseq/fairseq/checkpoint_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/checkpoint_utils.py",
"repo_id": "COCO-LM",
"token_count": 13028
}
| 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
def compute_cross_entropy_loss(logits, targets, ignore_index=-100):
"""
Function to compute the cross entropy loss. The default value of
ignore_index is the same as the default value for F.cross_entropy in
pytorch.
"""
assert logits.size(0) == targets.size(
-1
), "Logits and Targets tensor shapes don't match up"
loss = F.nll_loss(
F.log_softmax(logits, -1, dtype=torch.float32),
targets,
reduction="sum",
ignore_index=ignore_index,
)
return loss
@register_criterion("legacy_masked_lm_loss")
class LegacyMaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
This optionally also computes the next sentence prediction (NSP) loss and
adds it to the overall loss based on the specified args. There are three
cases to consider:
1) Generic MLM training without NSP loss. In this case sentence_targets
and sentence_logits are both None.
2) BERT training without NSP loss. In this case sentence_targets is
not None but sentence_logits is None and we should not be computing
a sentence level loss.
3) BERT training with NSP loss. In this case both sentence_targets and
sentence_logits are not None and we should be computing a sentence
level loss. The weight of the sentence level loss is specified as
an argument.
"""
def __init__(self, task, masked_lm_only, nsp_loss_weight):
super().__init__(task)
self.masked_lm_only = masked_lm_only
self.nsp_loss_weight = nsp_loss_weight
@staticmethod
def add_args(parser):
"""Args for MaskedLM Loss"""
# Default for masked_lm_only is False so as to not break BERT training
parser.add_argument(
"--masked-lm-only",
default=False,
action="store_true",
help="compute MLM loss only",
)
parser.add_argument(
"--nsp-loss-weight",
default=1.0,
type=float,
help="weight for next sentence prediction" " loss (default 1)",
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
lm_logits, output_metadata = model(**sample["net_input"])
# reshape lm_logits from (N,T,C) to (N*T,C)
lm_logits = lm_logits.view(-1, lm_logits.size(-1))
lm_targets = sample["lm_target"].view(-1)
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
# compute the number of tokens for which loss is computed. This is used
# to normalize the loss
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = lm_loss / ntokens
nsentences = sample["nsentences"]
# nsentences = 0
# Compute sentence loss if masked_lm_only is False
sentence_loss = None
if not self.masked_lm_only:
sentence_logits = output_metadata["sentence_logits"]
sentence_targets = sample["sentence_target"].view(-1)
# This needs to be recomputed due to some differences between
# TokenBlock and BlockPair dataset. This can be resolved with a
# refactor of BERTModel which we will do in the future.
# TODO: Remove this after refactor of BERTModel
nsentences = sentence_targets.size(0)
# Check for logits being none which can happen when remove_heads
# is set to true in the BERT model. Ideally we should set
# masked_lm_only to true in this case, but that requires some
# refactor in the BERT model.
if sentence_logits is not None:
sentence_loss = compute_cross_entropy_loss(
sentence_logits, sentence_targets
)
loss += self.nsp_loss_weight * (sentence_loss / nsentences)
# NOTE: as we are summing up per token mlm loss and per sentence nsp loss
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"lm_loss": utils.item(lm_loss.data) if reduce else lm_loss.data,
# sentence loss is not always computed
"sentence_loss": (
(utils.item(sentence_loss.data) if reduce else sentence_loss.data)
if sentence_loss is not None
else 0.0
),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
lm_loss_sum = sum(log.get("lm_loss", 0) for log in logging_outputs)
sentence_loss_sum = sum(log.get("sentence_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_loss = sum(log.get("loss", 0) for log in logging_outputs)
metrics.log_scalar(
"loss",
agg_loss / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
metrics.log_scalar(
"lm_loss",
lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0,
ntokens,
round=3,
)
metrics.log_scalar(
"sentence_loss",
sentence_loss_sum / nsentences / math.log(2) if nsentences > 0 else 0.0,
nsentences,
round=3,
)
metrics.log_scalar(
"nll_loss",
lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0,
ntokens,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/legacy_masked_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/legacy_masked_lm.py",
"repo_id": "COCO-LM",
"token_count": 3057
}
| 194 |
import numpy as np
from fairseq.data.audio.feature_transforms import (
AudioFeatureTransform,
register_audio_feature_transform,
)
@register_audio_feature_transform("utterance_cmvn")
class UtteranceCMVN(AudioFeatureTransform):
"""Utterance-level CMVN (cepstral mean and variance normalization)"""
@classmethod
def from_config_dict(cls, config=None):
_config = {} if config is None else config
return UtteranceCMVN(
_config.get("norm_means", True),
_config.get("norm_vars", True),
)
def __init__(self, norm_means=True, norm_vars=True):
self.norm_means, self.norm_vars = norm_means, norm_vars
def __repr__(self):
return (
self.__class__.__name__
+ f"(norm_means={self.norm_means}, norm_vars={self.norm_vars})"
)
def __call__(self, x):
mean = x.mean(axis=0)
square_sums = (x ** 2).sum(axis=0)
if self.norm_means:
x = np.subtract(x, mean)
if self.norm_vars:
var = square_sums / x.shape[0] - mean ** 2
std = np.sqrt(np.maximum(var, 1e-10))
x = np.divide(x, std)
return x
|
COCO-LM/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/audio/feature_transforms/utterance_cmvn.py",
"repo_id": "COCO-LM",
"token_count": 566
}
| 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
WHITESPACE_NORMALIZER = re.compile(r"\s+")
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
# excluding non-breaking space (160) here
PRINTABLE_LATIN = set(
list(range(32, 126 + 1)) + list(range(161, 172 + 1)) + list(range(174, 255 + 1))
)
BYTE_TO_BCHAR = {
b: chr(b) if b in PRINTABLE_LATIN else chr(256 + b) for b in range(256)
}
BCHAR_TO_BYTE = {bc: b for b, bc in BYTE_TO_BCHAR.items()}
def byte_encode(x: str) -> str:
normalized = WHITESPACE_NORMALIZER.sub(SPACE, x)
return "".join([BYTE_TO_BCHAR[b] for b in normalized.encode("utf-8")])
def byte_decode(x: str) -> str:
try:
return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode("utf-8")
except ValueError:
return ""
def smart_byte_decode(x: str) -> str:
output = byte_decode(x)
if output == "":
# DP the best recovery (max valid chars) if it's broken
n_bytes = len(x)
f = [0 for _ in range(n_bytes + 1)]
pt = [0 for _ in range(n_bytes + 1)]
for i in range(1, n_bytes + 1):
f[i], pt[i] = f[i - 1], i - 1
for j in range(1, min(4, i) + 1):
if f[i - j] + 1 > f[i] and len(byte_decode(x[i - j : i])) > 0:
f[i], pt[i] = f[i - j] + 1, i - j
cur_pt = n_bytes
while cur_pt > 0:
if f[cur_pt] == f[pt[cur_pt]] + 1:
output = byte_decode(x[pt[cur_pt] : cur_pt]) + output
cur_pt = pt[cur_pt]
return output
|
COCO-LM/fairseq/fairseq/data/encoders/byte_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/byte_utils.py",
"repo_id": "COCO-LM",
"token_count": 800
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
|
COCO-LM/fairseq/fairseq/data/id_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/id_dataset.py",
"repo_id": "COCO-LM",
"token_count": 146
}
| 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import math
import os
from collections import OrderedDict, defaultdict
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
Dictionary,
LanguagePairDataset,
PrependTokenDataset,
SampledMultiDataset,
SampledMultiEpochDataset,
StripTokenDataset,
TransformEosLangPairDataset,
TruncateDataset,
data_utils,
indexed_dataset,
)
from fairseq.data.multilingual.multilingual_utils import (
EncoderLangtok,
LangTokSpec,
LangTokStyle,
augment_dictionary,
get_lang_tok,
)
from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat
from fairseq.file_io import PathManager
from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict
logger = logging.getLogger(__name__)
SRC_DICT_NAME = 'src'
TGT_DICT_NAME = 'tgt'
def _lang_id(dic: Dictionary, lang: str):
"""Return language ID index."""
idx = dic.index(lang)
assert idx != dic.unk_index, "cannot find language ID for lang {}".format(lang)
return idx
def load_sampling_weights(from_file):
with open(from_file) as f:
weights = json.load(f)
return weights
class MultilingualDatasetManager(object):
def __init__(self, args, lang_pairs, langs, dicts, sampling_method):
super().__init__()
self.args = args
self.seed = args.seed
self.lang_pairs = lang_pairs
self.extra_lang_pairs = (
list(
{p for _, v in args.extra_lang_pairs.items() for p in v.split(",")}
)
if args.extra_lang_pairs
else []
)
self.src_langs = {p.split("-")[0] for p in args.lang_pairs + self.extra_lang_pairs}
self.tgt_langs = {p.split("-")[1] for p in args.lang_pairs + self.extra_lang_pairs}
self.langs = langs
self.dicts = dicts
self.lang_dict = self.create_lang_dictionary(self.langs)
self.sampling_method = sampling_method
self.sampling_scheduler = None
self._has_sharded_data = False
self._num_shards_dict = {}
self._training_data_sizes = defaultdict(lambda: {})
@classmethod
def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method):
return MultilingualDatasetManager(
args, lang_pairs, langs, dicts, sampling_method
)
@staticmethod
def add_args(parser):
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
action=FileContentsAction,
)
parser.add_argument(
"--langs",
default=None,
type=csv_str_list,
help="a list of languages comma sperated languages which can appear in lang-pairs; "
"note that the ordering determines language token IDs",
)
parser.add_argument(
"--lang-dict",
default=None,
type=str,
help="an external file which contains a list of "
"languages which can appear in lang-pairs; "
"note that the ordering determines language token IDs; "
"--langs and --lang-dict are two exclusive options",
)
parser.add_argument('--source-dict', default=None, type=str,
help='path to source dictionary; if specified it will override per language dictionary loading')
parser.add_argument('--target-dict', default=None, type=str,
help='path to target dictionary; if specified it will override per language dictionary loading')
parser.add_argument(
"--lang-tok-style",
default=LangTokStyle.multilingual.value,
type=str,
choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value],
help="language token styles",
)
parser.add_argument(
"--load-alignments",
action="store_true",
help="load the binarized alignments",
)
parser.add_argument(
"--left-pad-source",
default="True",
type=str,
metavar="BOOL",
help="pad the source on the left",
)
parser.add_argument(
"--left-pad-target",
default="False",
type=str,
metavar="BOOL",
help="pad the target on the left",
)
parser.add_argument(
"--max-source-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
"--upsample-primary",
default=1,
type=int,
help="amount to upsample primary dataset",
)
parser.add_argument(
"--truncate-source",
action="store_true",
default=False,
help="truncate source to max-source-positions",
)
parser.add_argument(
"--encoder-langtok",
default=None,
type=str,
choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value],
metavar="SRCTGT",
help="prepend to the beginning of source sentence the source or target "
"language token. (src/tgt)",
)
parser.add_argument(
"--decoder-langtok",
action="store_true",
help="prepend to the beginning of target sentence the target language token",
)
parser.add_argument(
"--lang-tok-replacing-bos-eos", action="store_true", default=False
)
parser.add_argument(
"--enable-lang-ids",
default=False,
action="store_true",
help="whether to include language IDs in samples",
)
parser.add_argument(
"--enable-reservsed-directions-shared-datasets",
default=False,
action="store_true",
help="whether to allow datasets be used in reversed directions",
)
parser.add_argument(
"--extra-data",
help='a dictionary of data name to this path, \
e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}',
type=lambda uf: eval_str_dict(uf, type=str),
default=None,
)
parser.add_argument(
"--extra-lang-pairs",
help='a dictionary of data name to the language pairs they serve, \
e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}',
type=lambda uf: eval_str_dict(uf, type=str),
default=None,
)
parser.add_argument(
"--fixed-dictionary",
help="Fixed dictionary to use with model path",
default=None,
type=str,
)
parser.add_argument(
"--langtoks-specs",
help='a list of comma separated data types that a set of language tokens to be specialized for, \
e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to \
distinguish languages in different training data types. If not specified, default language \
tokens per languages will be added',
default=LangTokSpec.main.value,
type=csv_str_list,
)
parser.add_argument(
"--langtoks",
help='a dictionary of how to add language tokens, \
e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": \
("src", "tgt")}, or {"mined": ("src.mined", "tgt")}',
default=None,
type=lambda uf: eval_str_dict(uf, type=str),
)
parser.add_argument(
"--sampling-weights-from-file",
help='a file contain a python dictionary of how to sample data sets, \
e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \
"mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }',
default=None,
type=str,
)
parser.add_argument(
"--sampling-weights",
help='a dictionary of how to sample data sets, \
e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \
"mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }',
default=None,
type=lambda uf: eval_str_dict(uf, type=str),
)
parser.add_argument(
"--virtual-epoch-size",
default=None,
type=int,
help="virtual epoch size to speed up data loading",
)
parser.add_argument(
"--virtual-data-size",
default=None,
type=int,
help="virtual data size of the whole joint dataset to speed"
"up data loading and have specific dynamic sampling strategy interval",
)
@classmethod
def load_langs(cls, args, **kwargs):
if args.lang_dict and args.langs:
raise ValueError("--langs and --lang-dict can not both be specified")
if args.lang_dict is None and args.langs is None:
logger.warning(
"External language dictionary is not provided; "
"use lang-pairs to infer the set of supported languages. "
"The language ordering is not stable which might cause "
"misalignment in pretraining and finetuning."
)
# infer from lang_pairs as it is
langs = list(
{x for lang_pair in args.lang_pairs for x in lang_pair.split("-")}
)
langs = sorted(langs)
logger.info(f"inferred language list: {langs}")
elif args.lang_dict:
with open(
PathManager.get_local_path(args.lang_dict), "r", encoding="utf-8"
) as f:
langs = [lang.strip() for lang in f.readlines() if lang.strip()]
logger.info(
f"loaded language list from {args.lang_dict} as they are ordered in file"
)
elif args.langs:
langs = args.langs
logger.info(
f"parsed the language list as they are ordered in the option: {langs}"
)
return langs
def has_sharded_data(self, split):
return self._has_sharded_data and split == getattr(
self.args, "train_subset", None
)
def _shared_collater(self):
return not (self.args.extra_data and "mono_dae" in self.args.extra_data) and (
not self.args.lang_tok_replacing_bos_eos
)
def estimate_global_pass_epoch(self, epoch):
if self.args.virtual_epoch_size is None or self.args.virtual_data_size is None:
return None
# one epoch more for remaining data in each shard
virtual_epochs_per_shard = math.ceil(
self.args.virtual_data_size / self.args.virtual_epoch_size
)
# note that fairseq epoch / shard_epoch starts from 1
shard_epoch = (epoch - 1) // virtual_epochs_per_shard + 1
return shard_epoch
@classmethod
def prepare(cls, load_dictionary, args, **kargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
if args.langtoks is None:
args.langtoks = {}
if "main" not in args.langtoks:
src_langtok_spec = args.encoder_langtok if args.encoder_langtok else None
tgt_langtok_spec = "tgt" if args.decoder_langtok else None
args.langtoks["main"] = (src_langtok_spec, tgt_langtok_spec)
def check_langs(langs, pairs):
messages = []
for src, tgt in pairs:
if src not in langs or tgt not in langs:
messages.append(
f"language pair {src}-{tgt} contains languages "
"that are not in the language dictionary"
)
if len(messages) > 0:
raise ValueError(" ".join(messages) + f"; langs: {langs}")
if args.lang_pairs is None:
raise ValueError(
"--lang-pairs is required. List all the language pairs in the training objective."
)
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(",")
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
language_list = cls.load_langs(args, **kargs)
check_langs(
language_list,
(
[p.split("-") for p in args.lang_pairs]
if training
else [(args.source_lang, args.target_lang)]
),
)
def load_dictionary_and_postproc(path):
d = load_dictionary(path)
augment_dictionary(
dictionary=d,
language_list=language_list,
lang_tok_style=args.lang_tok_style,
langtoks_specs=args.langtoks_specs,
extra_data=args.extra_data,
)
return d
dicts = cls.load_all_dictionaries(args, language_list, load_dictionary_and_postproc, training)
return language_list, dicts, training
@classmethod
def load_all_dictionaries(cls, args, language_list, load_dictionary, training):
dicts = OrderedDict()
if args.source_dict is not None:
dicts[SRC_DICT_NAME] = load_dictionary(args.source_dict)
if args.target_dict is not None:
dicts[TGT_DICT_NAME] = load_dictionary(args.target_dict)
if training:
extra_lang_pairs = (
list(
{p for _, v in args.extra_lang_pairs.items() for p in v.split(",")}
)
if args.extra_lang_pairs
else []
)
src_langs_to_load_dicts = sorted(
{p.split("-")[0] for p in (args.lang_pairs + extra_lang_pairs)}
)
tgt_langs_to_load_dicts = sorted(
{p.split("-")[1] for p in (args.lang_pairs + extra_lang_pairs)}
)
else:
src_langs_to_load_dicts = [args.source_lang]
tgt_langs_to_load_dicts = [args.target_lang]
paths = utils.split_paths(args.data)
assert len(paths) > 0
def load_dicts(langs_to_load_dicts):
for lang in langs_to_load_dicts:
dicts[lang] = load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(lang))
)
if len(dicts) > 0:
dict0 = next(iter(dicts.values()))
assert dicts[lang].pad() == dict0.pad()
assert dicts[lang].eos() == dict0.eos()
assert dicts[lang].unk() == dict0.unk()
logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang])))
if args.fixed_dictionary is not None:
fixed_dict = load_dictionary(args.fixed_dictionary)
dicts = {lang: fixed_dict for lang in src_langs_to_load_dicts + tgt_langs_to_load_dicts}
else:
if args.source_dict is None:
load_dicts(src_langs_to_load_dicts)
if args.target_dict is None:
load_dicts(tgt_langs_to_load_dicts)
return dicts
def get_source_dictionary(self, lang):
if self.args.source_dict is not None:
return self.dicts[SRC_DICT_NAME]
else:
return self.dicts[lang]
def get_target_dictionary(self, lang):
if self.args.target_dict is not None:
return self.dicts[TGT_DICT_NAME]
else:
return self.dicts[lang]
@classmethod
def create_lang_dictionary(cls, langs):
unk = "<unk>"
# hack to remove symbols other than unk as they are not needed by lang dict
lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk)
for lang in langs:
lang_dict.add_symbol(lang)
return lang_dict
@classmethod
def get_langtok_index(cls, lang_tok, dic):
idx = dic.index(lang_tok)
assert (
idx != dic.unk_index
), "cannot find language token {} in the dictionary".format(lang_tok)
return idx
def get_encoder_langtok(self, src_lang, tgt_lang, spec=None):
if spec is None:
return None
if spec and spec.startswith("src"):
if src_lang is None:
return None
langtok = get_lang_tok(
lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec
)
else:
if tgt_lang is None:
return None
langtok = get_lang_tok(
lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec
)
return self.get_langtok_index(
langtok, self.get_source_dictionary(src_lang) if src_lang else self.get_target_dictionary(tgt_lang)
)
def get_decoder_langtok(self, tgt_lang, spec=None):
if spec is None:
return None
langtok = get_lang_tok(
lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec
)
return self.get_langtok_index(langtok, self.get_target_dictionary(tgt_lang))
@classmethod
def load_data(cls, path, vdict, impl):
dataset = data_utils.load_indexed_dataset(path, vdict, impl)
return dataset
@classmethod
def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
def load_lang_dataset(
self,
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
max_source_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
):
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
logger.error(
f"Dataset not found: {data_path}, {split_k}, {src}, {tgt}"
)
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = self.load_data(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_datasets.append(self.load_data(prefix + tgt, tgt_dict, dataset_impl))
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(
data_path, "{}.align.{}-{}".format(split, src, tgt)
)
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
return src_dataset, tgt_dataset, align_dataset
def load_langpair_dataset(
self,
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
src_dataset_transform_func=lambda dataset: dataset,
tgt_dataset_transform_func=lambda dataset: dataset,
src_lang_id=None,
tgt_lang_id=None,
langpairs_sharing_datasets=None,
):
norm_direction = "-".join(sorted([src, tgt]))
if langpairs_sharing_datasets is not None:
src_dataset = langpairs_sharing_datasets.get(
(data_path, split, norm_direction, src), "NotInCache"
)
tgt_dataset = langpairs_sharing_datasets.get(
(data_path, split, norm_direction, tgt), "NotInCache"
)
align_dataset = langpairs_sharing_datasets.get(
(data_path, split, norm_direction, src, tgt), "NotInCache"
)
# a hack: any one is not in cache, we need to reload them
if (
langpairs_sharing_datasets is None
or src_dataset == "NotInCache"
or tgt_dataset == "NotInCache"
or align_dataset == "NotInCache"
or split != getattr(self.args, "train_subset", None)
):
# source and target datasets can be reused in reversed directions to save memory
# reversed directions of valid and test data will not share source and target datasets
src_dataset, tgt_dataset, align_dataset = self.load_lang_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
max_source_positions=max_source_positions,
prepend_bos=prepend_bos,
load_alignments=load_alignments,
truncate_source=truncate_source,
)
src_dataset = src_dataset_transform_func(src_dataset)
tgt_dataset = tgt_dataset_transform_func(tgt_dataset)
if langpairs_sharing_datasets is not None:
langpairs_sharing_datasets[
(data_path, split, norm_direction, src)
] = src_dataset
langpairs_sharing_datasets[
(data_path, split, norm_direction, tgt)
] = tgt_dataset
langpairs_sharing_datasets[
(data_path, split, norm_direction, src, tgt)
] = align_dataset
if align_dataset is None:
# no align data so flag the reverse direction as well in sharing
langpairs_sharing_datasets[
(data_path, split, norm_direction, tgt, src)
] = align_dataset
else:
logger.info(
f"Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: "
f"[{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}"
)
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset.sizes if tgt_dataset is not None else None,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
src_lang_id=src_lang_id,
tgt_lang_id=tgt_lang_id,
)
def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None):
if self.args.lang_tok_replacing_bos_eos:
# it is handled by self.alter_dataset_langtok
# TODO: Unifiy with alter_dataset_langtok
return dataset
if spec is None:
return dataset
tok = self.get_encoder_langtok(src_lang, tgt_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None):
if dataset is None:
# note that target dataset can be None during inference time
return None
if self.args.lang_tok_replacing_bos_eos:
# TODO: Unifiy with alter_dataset_langtok
# It is handled by self.alter_dataset_langtok.
# The complication in self.alter_dataset_langtok
# makes a unified framework difficult.
return dataset
# if not self.args.decoder_langtok:
if not spec:
return dataset
tok = self.get_decoder_langtok(target_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def alter_dataset_langtok(
self,
lang_pair_dataset,
src_eos=None,
src_lang=None,
tgt_eos=None,
tgt_lang=None,
src_langtok_spec=None,
tgt_langtok_spec=None,
):
if src_langtok_spec is None and tgt_langtok_spec is None:
return lang_pair_dataset
new_src_eos = None
if (
src_langtok_spec is not None
and src_eos is not None
and (src_lang is not None or tgt_lang is not None)
):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec)
else:
src_eos = None
new_tgt_bos = None
if tgt_langtok_spec and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_a_dataset(
self,
split,
data_path,
src,
src_dict,
tgt,
tgt_dict,
combine,
prepend_bos=False,
langpairs_sharing_datasets=None,
data_category=None,
**extra_kwargs,
):
dataset_impl = self.args.dataset_impl
upsample_primary = self.args.upsample_primary
left_pad_source = self.args.left_pad_source
left_pad_target = self.args.left_pad_target
max_source_positions = self.args.max_source_positions
max_target_positions = self.args.max_target_positions
load_alignments = self.args.load_alignments
truncate_source = self.args.truncate_source
src_dataset_transform_func = self.src_dataset_tranform_func
tgt_dataset_transform_func = self.tgt_dataset_tranform_func
enable_lang_ids = self.args.enable_lang_ids
lang_dictionary = self.lang_dict
src_langtok_spec, tgt_langtok_spec = extra_kwargs["langtok_spec"]
src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec)
tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec)
logger.info(
f"{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}"
)
langpair_ds = self.load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos,
load_alignments,
truncate_source,
src_dataset_transform_func=lambda dataset: src_dataset_transform_func(
src, tgt, dataset, src_langtok_spec
),
tgt_dataset_transform_func=lambda dataset: tgt_dataset_transform_func(
src, tgt, dataset, tgt_langtok_spec
),
src_lang_id=_lang_id(lang_dictionary, src)
if enable_lang_ids and lang_dictionary is not None
else None,
tgt_lang_id=_lang_id(lang_dictionary, tgt)
if enable_lang_ids and lang_dictionary is not None
else None,
langpairs_sharing_datasets=langpairs_sharing_datasets,
)
# TODO: handle modified lang toks for mined data and dae data
if self.args.lang_tok_replacing_bos_eos:
ds = self.alter_dataset_langtok(
langpair_ds,
src_eos=self.get_source_dictionary(src).eos() if src else self.get_target_dictionary(tgt).eos(),
src_lang=src,
tgt_eos=self.get_target_dictionary(tgt).eos(),
tgt_lang=tgt,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
ds = langpair_ds
return ds
def load_split_langpair_datasets(self, split, data_param_list):
datasets = []
langpairs_sharing_datasets = (
{} if self.args.enable_reservsed_directions_shared_datasets else None
)
for param in data_param_list:
ds = self.load_a_dataset(
split=split,
langpairs_sharing_datasets=langpairs_sharing_datasets,
**param,
)
datasets.append(ds)
return datasets
def get_data_paths_and_lang_pairs(self, split):
datapaths = {"main": self.args.data}
lang_pairs = {"main": self.lang_pairs}
if split == getattr(self.args, "train_subset", None):
# only training data can have extra data and extra language pairs
if self.args.extra_data:
extra_datapaths = self.args.extra_data
datapaths.update(extra_datapaths)
if self.args.extra_lang_pairs:
extra_lang_pairs = {
k: v.split(",") for k, v in self.args.extra_lang_pairs.items()
}
lang_pairs.update(extra_lang_pairs)
return datapaths, lang_pairs
@classmethod
def get_dataset_key(cls, data_category, src, tgt):
return f"{data_category}:{src}-{tgt}"
@classmethod
def _get_shard_num_dict(cls, split, paths):
shards = defaultdict(int)
for path in paths:
files = PathManager.ls(path)
directions = set()
for f in files:
if f.startswith(split) and f.endswith(".idx"):
# idx files of the form "{split}.{src}-{tgt}.{lang}.idx"
direction = f.split(".")[-3]
directions.add(direction)
for direction in directions:
shards[direction] += 1
return shards
def get_split_num_data_shards(self, split):
if split in self._num_shards_dict:
return self._num_shards_dict[split]
num_shards_dict = {}
data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split)
for data_category, paths in data_paths.items():
if data_category not in lang_pairs:
continue
paths = utils.split_paths(paths)
shards_dict = self._get_shard_num_dict(split, paths)
lang_dirs = [
lang_pair.split("-") for lang_pair in lang_pairs[data_category]
]
lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs]
for src, tgt in lang_dirs:
key = self.get_dataset_key(data_category, src, tgt)
if "mono_" in data_category:
# monolingual data requires tgt only
assert src is None or src == tgt, (
f"error: src={src}, "
"tgt={tgt} for data_category={data_category}"
)
num_shards_dict[key] = shards_dict[tgt]
else:
if f"{src}-{tgt}" in shards_dict:
num_shards_dict[key] = shards_dict[f"{src}-{tgt}"]
elif f"{tgt}-{src}" in shards_dict:
# follow the fairseq tradition to use reversed direction data if it is not available
num_shards_dict[key] = shards_dict[f"{tgt}-{src}"]
self._num_shards_dict[split] = num_shards_dict
logger.info(f"[{split}] num of shards: {num_shards_dict}")
return num_shards_dict
@classmethod
def get_shard_id(cls, num_shards, epoch, shard_epoch=None):
shard = epoch if shard_epoch is None else shard_epoch
shard = (shard - 1) % num_shards
return shard
def get_split_data_path(self, paths, epoch, shard_epoch, num_shards):
path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)]
return path
def get_split_data_param_list(self, split, epoch, shard_epoch=None):
# TODO: to extend with extra datasets and keys and loop over different shard data paths
param_list = []
data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split)
logger.info(f"langtoks settings: {self.args.langtoks}")
split_num_shards_dict = self.get_split_num_data_shards(split)
for data_category, paths in data_paths.items():
if data_category not in lang_pairs:
continue
paths = utils.split_paths(paths)
assert len(paths) > 0
if len(paths) > 1:
self._has_sharded_data = True
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
if data_category in self.args.langtoks:
lang_tok_spec = self.args.langtoks[data_category]
else:
# default to None
lang_tok_spec = (None, None)
# infer langcode
lang_dirs = [
lang_pair.split("-") for lang_pair in lang_pairs[data_category]
]
lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs]
for src, tgt in lang_dirs:
assert src is not None or data_category == "mono_dae", (
f"error: src={src}, " "tgt={tgt} for data_category={data_category}"
)
# logger.info(f"preparing param for {data_category}: {src} - {tgt}")
key = self.get_dataset_key(data_category, src, tgt)
data_path = self.get_split_data_path(
paths, epoch, shard_epoch, split_num_shards_dict[key]
)
param_list.append(
{
"key": key,
"data_path": data_path,
"split": split,
"src": src,
"src_dict": self.get_source_dictionary(src)
if src and data_category != "mono_dae"
else None,
"tgt": tgt,
"tgt_dict": self.get_target_dictionary(tgt),
"data_category": data_category,
"langtok_spec": lang_tok_spec,
}
)
return param_list
def get_train_dataset_sizes(
self, data_param_list, datasets, epoch, shard_epoch=None
):
num_shards = [
self.get_split_num_data_shards(param["split"])[param["key"]]
for param in data_param_list
]
data_sizes = []
for (key, d), num_shard in zip(datasets, num_shards):
my_data_sizes = self._training_data_sizes[key]
shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch)
if shard_ind not in my_data_sizes:
my_data_sizes[shard_ind] = len(d)
known_size = max(my_data_sizes.values())
data_sizes.append(
# If we don't know the data size of the shard yet,
# use the the max known data size to approximate.
# Note that we preprocess shards by a designated shard size
# and put any remaining data at the end into the last shard so
# the max shard size approximation is almost correct before loading
# the last shard; after loading the last shard, it will have the
# exact data sizes of the whole data size.
(key, sum(my_data_sizes.get(i, known_size) for i in range(num_shard)))
)
logger.info(
f"estimated total data sizes of all shards used in sampling ratios: {data_sizes}. "
"Note that if the data a shard has not been loaded yet, use the max known data size to approximate"
)
return [s for _, s in data_sizes]
def get_train_sampling_ratios(
self, data_param_list, datasets, epoch=1, shard_epoch=None
):
data_sizes = self.get_train_dataset_sizes(
data_param_list, datasets, epoch, shard_epoch
)
sampling_func = self.sampling_method.sampling_method_selector()
sample_ratios = sampling_func(data_sizes) if sampling_func is not None else None
return sample_ratios
def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None):
if self.args.sampling_weights_from_file:
weights = load_sampling_weights(self.args.sampling_weights_from_file)
sample_ratios = [weights[k] for k, _ in datasets]
logger.info(
"| ignoring --sampling-weights when loadding sampling weights "
f"from file {self.args.sampling_weights_from_file}"
)
elif self.args.sampling_weights:
sample_ratios = [self.args.sampling_weights[k] for k, _ in datasets]
else:
sample_ratios = self.get_train_sampling_ratios(
data_param_list, datasets, epoch, shard_epoch
)
if sample_ratios is not None:
logger.info(
"| Upsample ratios: {}".format(
list(zip(map(lambda x: x["key"], data_param_list), sample_ratios))
)
)
assert len(sample_ratios) == len(datasets)
return sample_ratios
def load_split_datasets(
self, split, training, epoch=1, combine=False, shard_epoch=None, **kwargs
):
data_param_list = self.get_split_data_param_list(
split, epoch, shard_epoch=shard_epoch
)
langpairs_sharing_datasets = (
{} if self.args.enable_reservsed_directions_shared_datasets else None
)
datasets = [
(
param["key"],
self.load_a_dataset(
combine=combine,
langpairs_sharing_datasets=langpairs_sharing_datasets,
**param,
),
)
for param in data_param_list
]
return datasets, data_param_list
def load_into_concat_dataset(self, split, datasets, data_param_list):
if self.args.lang_tok_replacing_bos_eos:
# TODO: to investigate why TransformEosLangPairDataset doesn't work with ConcatDataset
return SampledMultiDataset(
OrderedDict(datasets),
sampling_ratios=None,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=None,
split=split,
)
return ConcatDataset([d for _, d in datasets])
def load_sampled_multi_epoch_dataset(
self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs
):
datasets, data_param_list = self.load_split_datasets(
split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs
)
if training and split == getattr(self.args, "train_subset", None):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiEpochDataset(
OrderedDict(datasets),
epoch=epoch,
shard_epoch=shard_epoch,
# valid and test datasets will be degenerate to concating datasets:
sampling_ratios=sample_ratios,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=self.args.virtual_data_size,
split=split,
virtual_epoch_size=self.args.virtual_epoch_size,
# if not using lang_tok altering, simplified to use the same collater
shared_collater=self._shared_collater(),
)
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_sampled_multi_dataset(
self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs
):
datasets, data_param_list = self.load_split_datasets(
split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs
)
if training and split == getattr(self.args, "train_subset", None):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiDataset(
OrderedDict(datasets),
epoch=epoch,
# valid and test datasets will be degerate to concating datasets:
sampling_ratios=sample_ratios,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=self.args.virtual_data_size,
split=split,
# if not using lang_tok altering, simplified to use the same collater
shared_collater=self._shared_collater(),
)
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_dataset(
self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs
):
if self.args.virtual_epoch_size is None:
return self.load_sampled_multi_dataset(
split, training, epoch, combine, shard_epoch, **kwargs
)
else:
return self.load_sampled_multi_epoch_dataset(
split, training, epoch, combine, shard_epoch, **kwargs
)
|
COCO-LM/fairseq/fairseq/data/multilingual/multilingual_data_manager.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/multilingual/multilingual_data_manager.py",
"repo_id": "COCO-LM",
"token_count": 23039
}
| 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
from fairseq.data import BaseWrapperDataset, plasma_utils
logger = logging.getLogger(__name__)
class ResamplingDataset(BaseWrapperDataset):
"""Randomly samples from a given dataset at each epoch.
Sampling is done with or without replacement, depending on the "replace"
parameter.
Optionally, the epoch size can be rescaled. This is potentially desirable
to increase per-epoch coverage of the base dataset (since sampling with
replacement means that many items in the dataset will be left out). In the
case of sampling without replacement, size_ratio should be strictly less
than 1.
Args:
dataset (~torch.utils.data.Dataset): dataset on which to sample.
weights (List[float]): list of probability weights
(default: None, which corresponds to uniform sampling).
replace (bool): sampling mode; True for "with replacement", or False
for "without replacement" (default: True)
size_ratio (float): the ratio to subsample to; must be positive
(default: 1.0).
batch_by_size (bool): whether or not to batch by sequence length
(default: True).
seed (int): RNG seed to use (default: 0).
epoch (int): starting epoch number (default: 1).
"""
def __init__(
self,
dataset,
weights=None,
replace=True,
size_ratio=1.0,
batch_by_size=True,
seed=0,
epoch=1,
):
super().__init__(dataset)
if weights is None:
self.weights = None
else:
assert len(weights) == len(dataset)
weights_arr = np.array(weights, dtype=np.float64)
weights_arr /= weights_arr.sum()
self.weights = plasma_utils.PlasmaArray(weights_arr)
self.replace = replace
assert size_ratio > 0.0
if not self.replace:
assert size_ratio < 1.0
self.size_ratio = float(size_ratio)
self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int)
self.batch_by_size = batch_by_size
self.seed = seed
self._cur_epoch = None
self._cur_indices = None
self.set_epoch(epoch)
def __getitem__(self, index):
return self.dataset[self._cur_indices.array[index]]
def __len__(self):
return self.actual_size
@property
def sizes(self):
if isinstance(self.dataset.sizes, list):
return [s[self._cur_indices.array] for s in self.dataset.sizes]
return self.dataset.sizes[self._cur_indices.array]
def num_tokens(self, index):
return self.dataset.num_tokens(self._cur_indices.array[index])
def size(self, index):
return self.dataset.size(self._cur_indices.array[index])
def ordered_indices(self):
if self.batch_by_size:
order = [
np.arange(len(self)),
self.sizes,
] # No need to handle `self.shuffle == True`
return np.lexsort(order)
else:
return np.arange(len(self))
def prefetch(self, indices):
self.dataset.prefetch(self._cur_indices.array[indices])
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch):
logger.debug("ResamplingDataset.set_epoch: {}".format(epoch))
super().set_epoch(epoch)
if epoch == self._cur_epoch:
return
self._cur_epoch = epoch
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
42, # magic number
self.seed % (2 ** 32), # global seed
self._cur_epoch, # epoch index
]
)
self._cur_indices = plasma_utils.PlasmaArray(
rng.choice(
len(self.dataset),
self.actual_size,
replace=self.replace,
p=(None if self.weights is None else self.weights.array),
)
)
|
COCO-LM/fairseq/fairseq/data/resampling_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/resampling_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1912
}
| 199 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from . import FairseqDataset
class TransformEosLangPairDataset(FairseqDataset):
"""A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on
collated samples of language pair dataset.
Note that the transformation is applied in :func:`collater`.
Args:
dataset (~fairseq.data.FairseqDataset): dataset that collates sample into
LanguagePairDataset schema
src_eos (int): original source end-of-sentence symbol index to be replaced
new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol
tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced
new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the
beginning of 'prev_output_tokens'
"""
def __init__(
self,
dataset: FairseqDataset,
src_eos: int,
new_src_eos: Optional[int] = None,
tgt_bos: Optional[int] = None,
new_tgt_bos: Optional[int] = None,
):
self.dataset = dataset
self.src_eos = src_eos
self.new_src_eos = new_src_eos
self.tgt_bos = tgt_bos
self.new_tgt_bos = new_tgt_bos
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples, **extra_args):
samples = self.dataset.collater(samples, **extra_args)
if self.new_src_eos is not None:
if self.dataset.left_pad_source:
assert (
samples["net_input"]["src_tokens"][:, -1] != self.src_eos
).sum() == 0
samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos
else:
eos_idx = samples["net_input"]["src_lengths"] - 1
assert (
samples["net_input"]["src_tokens"][
torch.arange(eos_idx.size(0)), eos_idx
]
!= self.src_eos
).sum() == 0
eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1)
samples["net_input"]["src_tokens"].scatter_(
1, eos_idx, self.new_src_eos
)
if (
self.new_tgt_bos is not None
and "prev_output_tokens" in samples["net_input"]
):
if self.dataset.left_pad_target:
# TODO: support different padding direction on target side
raise NotImplementedError(
"TransformEosLangPairDataset does not implement --left-pad-target True option"
)
else:
assert (
samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos
).sum() == 0
samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos
return samples
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
@property
def sizes(self):
# dataset.sizes can be a dynamically computed sizes:
return self.dataset.sizes
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
|
COCO-LM/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1790
}
| 200 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
from typing import Dict, Optional
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(
b for b in cls.__bases__ if b != FairseqIncrementalState
)
return cls
|
COCO-LM/fairseq/fairseq/incremental_decoding_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/incremental_decoding_utils.py",
"repo_id": "COCO-LM",
"token_count": 699
}
| 201 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
from fairseq.model_parallel.modules import (
ModelParallelTransformerDecoderLayer,
ModelParallelTransformerEncoderLayer,
)
from fairseq.models import register_model
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
)
try:
from fairseq.model_parallel.megatron.mpu import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
VocabParallelEmbedding,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
@register_model("model_parallel_transformer")
class ModelParallelTransformerModel(TransformerModel):
"""
Model parallel Transformer model.
"""
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
dictionary.pad_to_multiple_(args.model_parallel_size * 8)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=num_embeddings ** -0.5)
nn.init.constant_(tensor[1], 0)
emb = VocabParallelEmbedding(
num_embeddings, embed_dim, padding_idx, init_method=_vocab_init
)
# if provided, load from preloaded dictionaries
if path:
raise NotImplementedError(
"Loading of embedding from path is not supported for model parallel"
)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return ModelParallelTransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return ModelParallelTransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
class ModelParallelTransformerEncoder(TransformerEncoder):
"""
Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerEncoderLayer`.
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if args.no_final_layer_norm:
self.layer_norm = None
def build_encoder_layer(self, args):
return ModelParallelTransformerEncoderLayer(args)
class ModelParallelTransformerDecoder(TransformerDecoder):
"""
Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerDecoderLayer`.
"""
def build_decoder_layer(self, args, no_encoder_attn=False):
return ModelParallelTransformerDecoderLayer(args, no_encoder_attn)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if not self.share_input_output_embed:
raise NotImplementedError(
"Model parallel training currently requires --share-decoder-input-output-embed"
)
features = copy_to_model_parallel_region(features)
# project back to size of vocabulary
x = self.output_projection(features)
if getattr(self.args, "criterion") != "vocab_parallel_cross_entropy":
x = gather_from_model_parallel_region(x).contiguous()
return x
|
COCO-LM/fairseq/fairseq/model_parallel/models/transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/models/transformer.py",
"repo_id": "COCO-LM",
"token_count": 1575
}
| 202 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base classes for various fairseq models.
"""
import logging
from argparse import Namespace
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
gen_parser_from_dataclass,
)
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig
from torch import Tensor
logger = logging.getLogger(__name__)
def check_type(module, expected_type):
if hasattr(module, "unwrapped_module"):
assert isinstance(module.unwrapped_module, expected_type), \
f"{type(module.unwrapped_module)} != {expected_type}"
else:
assert isinstance(module, expected_type), f"{type(module)} != {expected_type}"
class BaseFairseqModel(nn.Module):
"""Base class for fairseq models."""
def __init__(self):
super().__init__()
self._is_generation_fast = False
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
# do not set defaults so that settings defaults from various architectures still works
gen_parser_from_dataclass(parser, dc(), delete_default=True)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
raise NotImplementedError("Model must implement the build_model method")
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample["target"]
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Scriptable helper function for get_normalized_probs in ~BaseFairseqModel"""
if hasattr(self, "decoder"):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
# syntactic sugar for simple models which don't have a decoder
# (e.g., the classification tutorial)
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def extract_features(self, *args, **kwargs):
"""Similar to *forward* but only return features."""
return self(*args, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return None
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
if model_cfg is None and args is not None:
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict)
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
self.upgrade_state_dict_named(state_dict, "")
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code.
Args:
state_dict (dict): state dictionary to upgrade, in place
name (str): the state dict key corresponding to the current module
"""
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += "."
for n, c in m.named_children():
name = prefix + n
if hasattr(c, "upgrade_state_dict_named"):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, "upgrade_state_dict"):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
def _apply(m):
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
self.apply(_apply)
def prepare_for_inference_(self, cfg: DictConfig):
"""Prepare model for inference."""
kwargs = {}
kwargs["beamable_mm_beam_size"] = (
None
if getattr(cfg.generation, "no_beamable_mm", False)
else getattr(cfg.generation, "beam", 5)
)
kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False)
if getattr(cfg.generation, "retain_dropout", False):
kwargs["retain_dropout"] = cfg.generation.retain_dropout
kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules
self.make_generation_fast_(**kwargs)
def make_generation_fast_(self, **kwargs):
"""
Legacy entry point to optimize model for faster generation.
Prefer prepare_for_inference_.
"""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except (AttributeError, ValueError): # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module, prefix):
if len(prefix) > 0:
prefix += "."
base_func = BaseFairseqModel.make_generation_fast_
for n, m in module.named_modules():
if (
m != self
and hasattr(m, "make_generation_fast_")
# don't call this implementation again, e.g., if
# children modules also inherit from BaseFairseqModel
and m.make_generation_fast_.__func__ is not base_func
):
name = prefix + n
m.make_generation_fast_(name=name, **kwargs)
apply_make_generation_fast_(self, "")
def train(mode=True):
if mode:
raise RuntimeError("cannot train after make_generation_fast")
# this model should no longer be used for training
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
"""Make model exportable via ONNX trace."""
seen = set()
def apply_prepare_for_onnx_export_(module):
if (
module != self
and hasattr(module, "prepare_for_onnx_export_")
and module not in seen
):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
logger.info(x["args"])
return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"])
@classmethod
def hub_models(cls):
return {}
class FairseqEncoderDecoderModel(BaseFairseqModel):
"""Base class for encoder-decoder models.
Args:
encoder (FairseqEncoder): the encoder
decoder (FairseqDecoder): the decoder
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
check_type(self.encoder, FairseqEncoder)
check_type(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return features
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning(
"FairseqModel is deprecated, please use FairseqEncoderDecoderModel "
"or BaseFairseqModel instead",
stacklevel=4,
)
class FairseqMultiModel(BaseFairseqModel):
"""Base class for combining multiple encoder-decoder models."""
def __init__(self, encoders, decoders):
super().__init__()
assert encoders.keys() == decoders.keys()
self.keys = list(encoders.keys())
for key in self.keys:
check_type(encoders[key], FairseqEncoder)
check_type(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict(
{
key: FairseqEncoderDecoderModel(encoders[key], decoders[key])
for key in self.keys
}
)
@staticmethod
def build_shared_embeddings(
dicts: Dict[str, Dictionary],
langs: List[str],
embed_dim: int,
build_embedding: callable,
pretrained_embed_path: Optional[str] = None,
):
"""
Helper function to build shared embeddings for a set of languages after
checking that all dicts corresponding to those languages are equivalent.
Args:
dicts: Dict of lang_id to its corresponding Dictionary
langs: languages that we want to share embeddings for
embed_dim: embedding dimension
build_embedding: callable function to actually build the embedding
pretrained_embed_path: Optional path to load pretrained embeddings
"""
shared_dict = dicts[langs[0]]
if any(dicts[lang] != shared_dict for lang in langs):
raise ValueError(
"--share-*-embeddings requires a joined dictionary: "
"--share-encoder-embeddings requires a joined source "
"dictionary, --share-decoder-embeddings requires a joined "
"target dictionary, and --share-all-embeddings requires a "
"joint source + target dictionary."
)
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
raise NotImplementedError
def max_positions(self):
"""Maximum length supported by the model."""
return {
key: (
self.models[key].encoder.max_positions(),
self.models[key].decoder.max_positions(),
)
for key in self.keys
}
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return min(model.decoder.max_positions() for model in self.models.values())
@property
def encoder(self):
return self.models[self.keys[0]].encoder
@property
def decoder(self):
return self.models[self.keys[0]].decoder
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg=None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
if model_cfg is None and args is not None:
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict)
class FairseqLanguageModel(BaseFairseqModel):
"""Base class for decoder-only models.
Args:
decoder (FairseqDecoder): the decoder
"""
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
check_type(self.decoder, FairseqDecoder)
def forward(self, src_tokens, **kwargs):
"""
Run the forward pass for a decoder-only model.
Feeds a batch of tokens through the decoder to predict the next tokens.
Args:
src_tokens (LongTensor): tokens on which to condition the decoder,
of shape `(batch, tgt_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
Returns:
tuple:
- the decoder's output of shape `(batch, seq_len, vocab)`
- a dictionary with any model-specific outputs
"""
return self.decoder(src_tokens, **kwargs)
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, seq_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
return self.decoder.extract_features(src_tokens, **kwargs)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return self.decoder.max_positions()
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
@property
def supported_targets(self):
return {"future"}
class FairseqEncoderModel(BaseFairseqModel):
"""Base class for encoder-only models.
Args:
encoder (FairseqEncoder): the encoder
"""
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
check_type(self.encoder, FairseqEncoder)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
Run the forward pass for a encoder-only model.
Feeds a batch of tokens through the encoder to generate features.
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
Returns:
the encoder's output, typically of shape `(batch, src_len, features)`
"""
return self.encoder(src_tokens, src_lengths, **kwargs)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
encoder_out = net_output["encoder_out"]
if torch.is_tensor(encoder_out):
logits = encoder_out.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def max_positions(self):
"""Maximum length supported by the model."""
return self.encoder.max_positions()
|
COCO-LM/fairseq/fairseq/models/fairseq_model.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/fairseq_model.py",
"repo_id": "COCO-LM",
"token_count": 8781
}
| 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import (
FairseqNATModel,
LevenshteinTransformerDecoder,
LevenshteinTransformerModel,
ensemble_decoder,
)
from fairseq.models.transformer import Linear
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import new_arange
class NegativeDistanceScore(object):
def __init__(self):
# pre-compute some values
self.scores = {}
self.scores[0.5] = self.compute_score_full(50, 0.5)
self.scores[1.0] = self.compute_score_full(50, 1.0)
self.scores[2.0] = self.compute_score_full(50, 2.0)
def __call__(self, i, L, tau):
if (tau is None) or (tau > 1000):
return 1 / L
if tau in self.scores:
if L < self.scores[tau].shape[0]:
return self.scores[tau][L - 1, i]
return self.compute_score(L, tau)[i]
def compute_score(self, L, tau):
s = np.array([-abs(L / 2 - i) / tau for i in range(L)])
s = np.exp(s - s.max())
return s / s.sum()
def compute_score_full(self, L, tau):
s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau
s = np.tril(s, 0) + np.triu(s - float("inf"), 1)
s = np.exp(s - s.max(1, keepdims=True))
return s / s.sum(1, keepdims=True)
neg_scorer = NegativeDistanceScore()
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n")
raise e
B = in_tokens.size(0)
T = in_tokens.size(1)
V = vocab_size
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
insert_labels = [a[:-1] for a in full_labels]
# numericalize1
insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float()
insert_index, insert_labels = zip(
*[
(w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau))
for i, labels in enumerate(insert_labels)
for j, label in enumerate(labels[1:-1])
for k, w in enumerate(label)
]
) # HACK 1:-1
insert_index, insert_labels = [
torch.tensor(list(a), device=in_tokens.device)
for a in [insert_index, insert_labels]
]
insert_label_tensors.scatter_(0, insert_index.long(), insert_labels)
insert_label_tensors = insert_label_tensors.view(B, T - 1, V)
return insert_label_tensors
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx):
padding_masks = in_tokens[:, 1:].eq(padding_idx)
word_ins_scores.masked_fill_(padding_masks, 0.0)
word_ins_pred.masked_fill_(padding_masks, padding_idx)
in_coords = new_arange(in_tokens).type_as(in_scores)
# shift all padding predictions to infinite
out_coords = (in_coords[:, 1:] - 0.5).masked_fill(
word_ins_pred.eq(padding_idx), float("inf")
)
out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1]
out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords)
out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords)
return out_tokens, out_scores
@register_model("insertion_transformer")
class InsertionTransformerModel(LevenshteinTransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument("--label-tau", default=None, type=float)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
word_ins_out = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt = _get_ins_targets(
prev_output_tokens,
tgt_tokens,
self.pad,
self.unk,
len(self.tgt_dict),
tau=self.decoder.label_tau,
).type_as(word_ins_out)
word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
}
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# TODO: decoding for InsertionTransformer
word_ins_score = self.decoder.forward_word_ins(
normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
if eos_penalty > 0.0:
word_ins_score[:, :, self.pad] -= eos_penalty
word_ins_score, word_ins_pred = word_ins_score.max(-1)
output_tokens, output_scores = _apply_ins_words(
output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad
)
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
class InsertionTransformerDecoder(LevenshteinTransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# use the TransformerDecoder's __init__
super(LevenshteinTransformerDecoder, self).__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim)
self.label_tau = getattr(args, "label_tau", None)
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens):
features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0]
features = self.pool_out(
torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
def forward_mask_ins(self, *args, **kwargs):
raise NotImplementedError
def forward_word_del(self, *args, **kwargs):
raise NotImplementedError
@register_model_architecture("insertion_transformer", "insertion_transformer")
def insertion_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# special for insertion transformer
args.label_tau = getattr(args, "label_tau", None)
|
COCO-LM/fairseq/fairseq/models/nat/insertion_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/insertion_transformer.py",
"repo_id": "COCO-LM",
"token_count": 4742
}
| 204 |
#!/usr/bin/env python3
import logging
import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer
from torch import Tensor
logger = logging.getLogger(__name__)
@register_model("convtransformer")
class ConvTransformerModel(FairseqEncoderDecoderModel):
"""
Transformer-based Speech translation model from ESPNet-ST
https://arxiv.org/abs/2004.10234
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--decoder-output-dim",
type=int,
metavar="N",
help="decoder output dimension (extra linear layer if different from decoder embed dim)",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
parser.add_argument(
"--conv-out-channels",
type=int,
metavar="INT",
help="the number of output channels of conv layer",
)
@classmethod
def build_encoder(cls, args):
encoder = ConvTransformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
@staticmethod
@torch.jit.unused
def set_batch_first(lprobs):
lprobs.batch_first = True
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
if self.training:
self.set_batch_first(lprobs)
return lprobs
def output_layout(self):
return "BTD"
"""
The forward method inherited from the base class has a **kwargs argument in
its input, which is not supported in torchscript. This method overrites the forward
method definition without **kwargs.
"""
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
class ConvTransformerEncoder(FairseqEncoder):
"""Conv + Transformer encoder"""
def __init__(self, args):
"""Construct an Encoder object."""
super().__init__(None)
self.dropout = args.dropout
self.embed_scale = (
1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim)
)
self.padding_idx = 1
self.in_channels = 1
self.input_dim = args.input_feat_per_channel
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2),
torch.nn.ReLU(),
torch.nn.Conv2d(
args.conv_out_channels,
args.conv_out_channels,
3,
stride=2,
padding=3 // 2,
),
torch.nn.ReLU(),
)
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim, args.conv_out_channels
)
self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
self.padding_idx,
learned=False,
)
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend(
[TransformerEncoderLayer(args) for i in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def pooling_ratio(self):
return 4
def infer_conv_output_dim(self, in_channels, input_dim, out_channels):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=3 // 2)(x)
x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def forward(self, src_tokens, src_lengths):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
bsz, max_seq_len, _ = src_tokens.size()
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
x = self.conv(x)
bsz, _, output_seq_len, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
x = self.out(x)
x = self.embed_scale * x
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
input_lengths = torch.min(
(src_lengths.float() / subsampling_factor).ceil().long(),
x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long()
)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if not encoder_padding_mask.any():
maybe_encoder_padding_mask = None
else:
maybe_encoder_padding_mask = encoder_padding_mask
return {
"encoder_out": [x],
"encoder_padding_mask": [maybe_encoder_padding_mask]
if maybe_encoder_padding_mask is not None
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
(encoder_out["encoder_padding_mask"][0]).index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
(encoder_out["encoder_embedding"][0]).index_select(0, new_order)
]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out,
"encoder_padding_mask": new_encoder_padding_mask,
"encoder_embedding": new_encoder_embedding,
"encoder_states": encoder_states,
"src_tokens": [],
"src_lengths": [],
}
class TransformerDecoderNoExtra(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="convtransformer", arch_name="convtransformer")
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.max_source_positions = getattr(args, "max_source_positions", 3000)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim)
@register_model_architecture("convtransformer", "convtransformer_espnet")
def convtransformer_espnet(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
|
COCO-LM/fairseq/fairseq/models/speech_to_text/convtransformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/speech_to_text/convtransformer.py",
"repo_id": "COCO-LM",
"token_count": 7742
}
| 205 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from fairseq.modules.quant_noise import quant_noise
from torch import nn
class AdaptiveInput(nn.Module):
def __init__(
self,
vocab_size: int,
padding_idx: int,
initial_dim: int,
factor: float,
output_dim: int,
cutoff: List[int],
q_noise: float = 0,
qn_block_size: int = 8,
):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert (
vocab_size == cutoff[-1]
), "cannot specify cutoff larger than vocab size"
self.cutoff = cutoff
self.embedding_dim = output_dim
self.padding_idx = padding_idx
self.embeddings = nn.ModuleList()
for i in range(len(self.cutoff)):
prev = self.cutoff[i - 1] if i > 0 else 0
size = self.cutoff[i] - prev
dim = int(initial_dim // (factor ** i))
seq = nn.Sequential(
nn.Embedding(size, dim, self.padding_idx),
quant_noise(
nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size
),
)
self.embeddings.append(seq)
self.padding_idx = None
self.padding_idx = padding_idx
def init_weights(m):
if isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
elif hasattr(m, "weight"):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
def weights_for_band(self, band: int):
return self.embeddings[band][0].weight, self.embeddings[band][1].weight
def forward(self, input: torch.Tensor):
result = self._float_tensor.new(input.shape + (self.embedding_dim,))
for i in range(len(self.cutoff)):
mask = input.lt(self.cutoff[i])
if i > 0:
mask.mul_(input.ge(self.cutoff[i - 1]))
chunk_input = input[mask] - self.cutoff[i - 1]
else:
chunk_input = input[mask]
if mask.any():
result[mask] = self.embeddings[i](chunk_input)
return result
|
COCO-LM/fairseq/fairseq/modules/adaptive_input.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/adaptive_input.py",
"repo_id": "COCO-LM",
"token_count": 1290
}
| 206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.