text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
from fastapi import HTTPException, status
from models.schemas.workspace import AuthProvider
from resources import strings
from services.aad_authentication import AzureADAuthorization
from services.access_service import AccessService, AuthConfigValidationError
def extract_auth_information(workspace_creation_properties: dict) -> dict:
access_service = get_access_service('AAD')
try:
return access_service.extract_workspace_auth_information(workspace_creation_properties)
except AuthConfigValidationError as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
def get_access_service(provider: str = AuthProvider.AAD) -> AccessService:
if provider == AuthProvider.AAD:
return AzureADAuthorization()
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=strings.INVALID_AUTH_PROVIDER)
get_current_tre_user = AzureADAuthorization(require_one_of_roles=['TREUser'])
get_current_admin_user = AzureADAuthorization(require_one_of_roles=['TREAdmin'])
get_current_tre_user_or_tre_admin = AzureADAuthorization(require_one_of_roles=['TREUser', 'TREAdmin'])
get_current_workspace_owner_user = AzureADAuthorization(require_one_of_roles=['WorkspaceOwner'])
get_current_workspace_researcher_user = AzureADAuthorization(require_one_of_roles=['WorkspaceResearcher'])
get_current_airlock_manager_user = AzureADAuthorization(require_one_of_roles=['AirlockManager'])
get_current_workspace_owner_or_researcher_user = AzureADAuthorization(require_one_of_roles=['WorkspaceOwner', 'WorkspaceResearcher'])
get_current_workspace_owner_or_airlock_manager = AzureADAuthorization(require_one_of_roles=['WorkspaceOwner', 'AirlockManager'])
get_current_workspace_owner_or_researcher_user_or_airlock_manager = AzureADAuthorization(require_one_of_roles=['WorkspaceOwner', 'WorkspaceResearcher', 'AirlockManager'])
get_current_workspace_owner_or_researcher_user_or_tre_admin = AzureADAuthorization(require_one_of_roles=["TREAdmin", "WorkspaceOwner", "WorkspaceResearcher"])
get_current_workspace_owner_or_researcher_user_or_airlock_manager_or_tre_admin = AzureADAuthorization(require_one_of_roles=["TREAdmin", "WorkspaceOwner", "WorkspaceResearcher", "AirlockManager"])
get_current_workspace_owner_or_tre_admin = AzureADAuthorization(require_one_of_roles=["TREAdmin", "WorkspaceOwner"])
|
AzureTRE/api_app/services/authentication.py/0
|
{
"file_path": "AzureTRE/api_app/services/authentication.py",
"repo_id": "AzureTRE",
"token_count": 792
}
| 94 |
import pytest
from mock import patch
from fastapi import HTTPException
from db.errors import UnableToAccessDatabase
from db.repositories.base import BaseRepository
from api.helpers import get_repository
pytestmark = pytest.mark.asyncio
@patch("db.repositories.base.BaseRepository.create")
async def test_get_repository_raises_http_exception_when_unable_to_access_database(create_base_repo_mock):
create_base_repo_mock.side_effect = UnableToAccessDatabase()
with pytest.raises(HTTPException):
get_repo = get_repository(BaseRepository)
await get_repo()
|
AzureTRE/api_app/tests_ma/test_api/test_helpers.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_api/test_helpers.py",
"repo_id": "AzureTRE",
"token_count": 202
}
| 95 |
from unittest.mock import AsyncMock, MagicMock, patch
from azure.core.exceptions import AzureError
import pytest
from db import events
pytestmark = pytest.mark.asyncio
@patch("db.events.get_credential")
@patch("db.events.CosmosDBManagementClient")
async def test_bootstrap_database_success(cosmos_db_mgmt_client_mock, get_credential_async_context_mock):
get_credential_async_context_mock.return_value = AsyncMock()
cosmos_db_mgmt_client_mock.return_value = MagicMock()
result = await events.bootstrap_database()
assert result is True
@patch("db.events.get_credential")
@patch("db.events.CosmosDBManagementClient")
async def test_bootstrap_database_failure(cosmos_db_mgmt_client_mock, get_credential_async_context_mock):
get_credential_async_context_mock.return_value = AsyncMock()
cosmos_db_mgmt_client_mock.side_effect = AzureError("some error")
result = await events.bootstrap_database()
assert result is False
|
AzureTRE/api_app/tests_ma/test_db/test_events.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_db/test_events.py",
"repo_id": "AzureTRE",
"token_count": 341
}
| 96 |
import pytest
from models.domain.request_action import RequestAction
from models.domain.resource import Resource, ResourceType
from models.domain.user_resource import UserResource
from models.domain.workspace_service import WorkspaceService
OPERATION_ID = "0000c8e7-5c42-4fcb-a7fd-294cfc27aa76"
STEP_ID = "main"
@pytest.mark.parametrize('resource, expected', [
# enabled = True
(Resource(templateName="", templateVersion="", isEnabled=True, etag="", properties={}, id="1234", resourceType=ResourceType.Workspace, resourcePath="test"), True),
# enabled = False
(Resource(templateName="", templateVersion="", isEnabled=False, etag="", properties={}, id="1234", resourceType=ResourceType.Workspace, resourcePath="test"), False),
# enabled not set - defaults to True
(Resource(templateName="", templateVersion="", properties={}, id="1234", etag="", resourceType=ResourceType.Workspace, resourcePath="test"), True),
])
def test_resource_is_enabled_returns_correct_value(resource, expected):
assert resource.isEnabled == expected
def test_user_resource_get_resource_request_message_payload_augments_payload_with_extra_params():
owner_id = "abc"
workspace_id = "123"
parent_service_id = "abcdef"
user_resource = UserResource(id="123", templateName="user-template", templateVersion="1.0", etag="", ownerId=owner_id, workspaceId=workspace_id, parentWorkspaceServiceId=parent_service_id, resourcePath="test")
message_payload = user_resource.get_resource_request_message_payload(OPERATION_ID, STEP_ID, RequestAction.Install)
assert message_payload["workspaceId"] == workspace_id
assert message_payload["ownerId"] == owner_id
assert message_payload["parentWorkspaceServiceId"] == parent_service_id
def test_workspace_service_get_resource_request_message_payload_augments_payload_with_extra_params():
workspace_id = "123"
workspace_service = WorkspaceService(id="123", templateName="service-template", templateVersion="1.0", etag="", workspaceId=workspace_id, resourcePath="test")
message_payload = workspace_service.get_resource_request_message_payload(OPERATION_ID, STEP_ID, RequestAction.Install)
assert message_payload["workspaceId"] == workspace_id
|
AzureTRE/api_app/tests_ma/test_models/test_resource.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_models/test_resource.py",
"repo_id": "AzureTRE",
"token_count": 688
}
| 97 |
### Create a workspace (admin)
POST {{baseUrl}}/workspaces
Accept: {{contentType}}
Authorization: Bearer {{token}}
Content-Type: {{contentType}}
{
"templateName": "{{workspaceTemplate}}",
"properties": {
"display_name": "my workspace",
"description": "my workspace",
"client_id": "{{clientId}}",
"vm_size": "Standard_A1",
"no_of_vms": 2
}
}
### Create a workspace service (workspace owner)
POST {{baseUrl}}/workspaces/{{workspaceId}}/workspace-services
Accept: {{contentType}}
Authorization: Bearer {{token}}
Content-Type: {{contentType}}
{
"templateName": "{{workspaceServiceTemplate}}",
"properties": {
"display_name": "my workspace service",
"description": "my workspace service"
}
}
### Create a user resource (workspace researcher)
POST {{baseUrl}}/workspaces/{{workspaceId}}/workspace-services/{{workspaceServiceId}}/user-resources
Accept: {{contentType}}
Authorization: Bearer {{token}}
Content-Type: {{contentType}}
{
"templateName": "{{userResourceTemplate}}",
"properties": {
"display_name": "my user resource",
"description": "my user resource"
}
}
### Disable a workspace (admin)
PATCH {{baseUrl}}/workspaces/{{workspaceId}}
Accept: {{contentType}}
Authorization: Bearer {{token}}
Content-Type: {{contentType}}
{
"enabled": false
}
### Delete a workspace (admin)
DELETE {{baseUrl}}/workspaces/{{workspaceId}}
Accept: {{contentType}}
Authorization: Bearer {{token}}
### Disable a workspace service (workspace owner)
PATCH {{baseUrl}}/workspaces/{{workspaceId}}/workspace-services/{{workspaceServiceId}}
Accept: {{contentType}}
Authorization: Bearer {{token}}
Content-Type: {{contentType}}
{
"enabled": false
}
### Delete a workspace service (workspace owner)
DELETE {{baseUrl}}/workspaces/{{workspaceId}}/workspace-services/{{workspaceServiceId}}
Accept: {{contentType}}
Authorization: Bearer {{token}}
|
AzureTRE/api_http_requests/API Resource Modifying Endpoints.http/0
|
{
"file_path": "AzureTRE/api_http_requests/API Resource Modifying Endpoints.http",
"repo_id": "AzureTRE",
"token_count": 611
}
| 98 |
import json
import click
import logging
from tre.api_client import ApiClient
from tre.commands.operation import operation_show
from tre.output import output, output_option, query_option
from .contexts import pass_shared_service_context, SharedServiceContext
from .operation import shared_service_operation
from .operations import shared_service_operations
def shared_service_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str):
log = logging.getLogger(__name__)
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'GET', '/api/shared-services')
if response.is_success:
ids = [shared_service["id"] for shared_service in response.json()["sharedServices"]]
return [id for id in ids if id.startswith(incomplete)]
@click.group(invoke_without_command=True, help="Perform actions on an individual shared_service")
@click.argument('shared_service_id', required=True, type=click.UUID, shell_complete=shared_service_id_completion)
@click.pass_context
def shared_service(ctx: click.Context, shared_service_id: str) -> None:
ctx.obj = SharedServiceContext(shared_service_id)
@click.command(name="show", help="Show a shared_service")
@output_option()
@query_option()
@pass_shared_service_context
def shared_service_show(shared_service_context: SharedServiceContext, output_format, query):
log = logging.getLogger(__name__)
shared_service_id = shared_service_context.shared_service_id
if shared_service_id is None:
raise click.UsageError('Missing shared_service ID')
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'GET', f'/api/shared-services/{shared_service_id}', )
output(response, output_format=output_format, query=query, default_table_query=r"sharedService.{id:id,name:templateName, version:templateVersion, is_enabled:isEnabled, status: deploymentStatus}")
@click.command(name="invoke-action", help="Invoke an action on a shared_service")
@click.argument('action-name', required=True)
@click.option('--no-wait',
flag_value=True,
default=False)
@output_option()
@query_option()
@click.pass_context
@pass_shared_service_context
def shared_service_invoke_action(shared_service_context: SharedServiceContext, ctx: click.Context, action_name, no_wait, output_format, query):
log = logging.getLogger(__name__)
shared_service_id = shared_service_context.shared_service_id
if shared_service_id is None:
raise click.UsageError('Missing shared_service ID')
client = ApiClient.get_api_client_from_config()
click.echo(f"Invoking action {action_name}...\n", err=True)
response = client.call_api(
log,
'POST',
f'/api/shared-services/{shared_service_id}/invoke-action?action={action_name}'
)
if no_wait:
output(response, output_format=output_format, query=query)
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query)
@click.command(name="update", help="Update a shared service")
@click.option('--etag',
help='The etag of the shared service to update',
required=True)
@click.option('--definition', help='JSON definition for the shared service', required=False)
@click.option('--definition-file', help='File containing JSON definition for the workspace', required=False, type=click.File("r"))
@click.option('--no-wait',
flag_value=True,
default=False)
@output_option()
@query_option()
@click.pass_context
@pass_shared_service_context
def shared_service_update(shared_service_context: SharedServiceContext, ctx: click.Context, etag, definition, definition_file, no_wait, output_format, query, suppress_output: bool = False):
log = logging.getLogger(__name__)
shared_service_id = shared_service_context.shared_service_id
if shared_service_id is None:
raise click.UsageError('Missing shared_service ID')
if definition is None:
if definition_file is None:
raise click.UsageError('Please specify either a definition or a definition file')
definition = definition_file.read()
definition_dict = json.loads(definition)
client = ApiClient.get_api_client_from_config()
response = client.call_api(
log,
'PATCH',
f'/api/shared-services/{shared_service_id}',
headers={'etag': etag},
json_data=definition_dict)
if no_wait:
output(response, output_format=output_format, query=query, default_table_query=r"sharedService.{id:id,name:templateName, version:templateVersion, is_enabled:isEnabled, status: deploymentStatus}")
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query, suppress_output=suppress_output)
@click.command(name="set-enabled", help="Enable/disable a shared service")
@click.option('--etag',
help='The etag of the shared service to update',
required=True)
@click.option('--enable/--disable', is_flag=True, required=True)
@click.option('--no-wait',
flag_value=True,
default=False)
@output_option()
@query_option()
@pass_shared_service_context
def shared_service_set_enabled(shared_service_context: SharedServiceContext, etag, enable, no_wait, output_format, query, suppress_output: bool = False):
log = logging.getLogger(__name__)
shared_service_id = shared_service_context.shared_service_id
if shared_service_id is None:
raise click.UsageError('Missing shared_service ID')
client = ApiClient.get_api_client_from_config()
click.echo(f"Setting isEnabled to {enable}...", err=True)
response = client.call_api(
log,
'PATCH',
f'/api/shared-services/{shared_service_id}',
headers={'etag': etag},
json_data={'isEnabled': enable})
if no_wait:
if not suppress_output or not response.is_success:
output(response, output_format=output_format, query=query, default_table_query=r"sharedService.{id:id,name:templateName, version:templateVersion, is_enabled:isEnabled, status: deploymentStatus}")
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query, suppress_output=suppress_output)
@click.command(name="delete", help="Delete a shared_service")
@click.option('--yes', is_flag=True, default=False)
@click.option('--no-wait',
flag_value=True,
default=False)
@click.option('--ensure-disabled',
help="Ensure disabled before deleting (resources are required to be disabled before deleting)",
flag_value=True,
default=False)
@click.pass_context
@output_option()
@query_option()
@pass_shared_service_context
def shared_service_delete(shared_service_context: SharedServiceContext, ctx: click.Context, yes, no_wait, ensure_disabled, output_format, query):
log = logging.getLogger(__name__)
shared_service_id = shared_service_context.shared_service_id
if shared_service_id is None:
raise click.UsageError('Missing shared_service ID')
if not yes:
click.confirm("Are you sure you want to delete this shared_service?", err=True, abort=True)
client = ApiClient.get_api_client_from_config()
if ensure_disabled:
response = client.call_api(log, 'GET', f'/api/shared-services/{shared_service_id}')
shared_service_json = response.json()
if shared_service_json['sharedService']['isEnabled']:
etag = shared_service_json['sharedService']['_etag']
ctx.invoke(
shared_service_set_enabled,
etag=etag,
enable=False,
no_wait=False,
suppress_output=True
)
click.echo("Deleting shared_service...\n", err=True)
response = client.call_api(log, 'DELETE', f'/api/shared-services/{shared_service_id}')
if no_wait:
output(response, output_format=output_format, query=query)
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query)
shared_service.add_command(shared_service_show)
shared_service.add_command(shared_service_invoke_action)
shared_service.add_command(shared_service_update)
shared_service.add_command(shared_service_set_enabled)
shared_service.add_command(shared_service_delete)
shared_service.add_command(shared_service_operations)
shared_service.add_command(shared_service_operation)
|
AzureTRE/cli/tre/commands/shared_services/shared_service.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/shared_services/shared_service.py",
"repo_id": "AzureTRE",
"token_count": 3171
}
| 99 |
import click
from tre.commands.workspaces.contexts import WorkspaceContext
class WorkspaceAirlockContext(object):
def __init__(self, workspace_id: str, airlock_request_id: str):
self.workspace_id = workspace_id
self.airlock_request_id = airlock_request_id
@staticmethod
def add_airlock_id_to_context_obj(ctx: click.Context, airlock_request_id: str) -> "WorkspaceAirlockContext":
workspace_context = ctx.find_object(WorkspaceContext)
return WorkspaceAirlockContext(workspace_context.workspace_id, airlock_request_id)
@property
def airlock_id(self):
return self.airlock_request_id
pass_workspace_airlock_context = click.make_pass_decorator(WorkspaceAirlockContext)
|
AzureTRE/cli/tre/commands/workspaces/airlock/contexts.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/workspaces/airlock/contexts.py",
"repo_id": "AzureTRE",
"token_count": 267
}
| 100 |
import json
import logging
import click
from tre.api_client import ApiClient
from tre.commands.operation import operation_show
from tre.commands.workspaces.workspace_services.contexts import WorkspaceServiceContext, pass_workspace_service_context
from tre.output import output, output_option, query_option
@click.group(name="user-resources", help="List/add user user resources ")
def user_resources():
pass
@click.command(name="list", help="List user resources")
@output_option()
@query_option()
@pass_workspace_service_context
def user_resources_list(workspace_service_context: WorkspaceServiceContext, output_format, query):
log = logging.getLogger(__name__)
workspace_id = workspace_service_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
workspace_service_id = workspace_service_context.workspace_service_id
if workspace_service_id is None:
raise click.UsageError('Missing workspace service ID')
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
response = client.call_api(
log,
'GET',
f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}/user-resources',
scope_id=workspace_scope,
)
output(response, output_format=output_format, query=query, default_table_query=r"userResources[].{id:id, template_name:templateName, template_version:templateVersion, display_name:properties.display_name, owner:user.name}")
@click.command(name="new", help="Create a new user resource")
@click.option('--definition', help='JSON definition for the user resource', required=False)
@click.option('--definition-file', help='File containing JSON definition for the user resource', required=False, type=click.File("r"))
@click.option('--no-wait',
flag_value=True,
default=False)
@output_option()
@query_option()
@pass_workspace_service_context
def user_resouce_create(workspace_service_context: WorkspaceServiceContext, definition, definition_file, no_wait, output_format, query):
log = logging.getLogger(__name__)
workspace_id = workspace_service_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
workspace_service_id = workspace_service_context.workspace_service_id
if workspace_service_id is None:
raise click.UsageError('Missing workspace service ID')
if definition is None:
if definition_file is None:
raise click.UsageError('Please specify either a definition or a definition file')
definition = definition_file.read()
definition_dict = json.loads(definition)
client = ApiClient.get_api_client_from_config()
workspace_scope = client.get_workspace_scope(log, workspace_id)
click.echo("Creating user-resource...", err=True)
response = client.call_api(
log,
'POST',
f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}/user-resources',
json_data=definition_dict,
scope_id=workspace_scope
)
if no_wait:
output(response, output_format=output_format, query=query)
return response.text
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query, scope_id=workspace_scope)
user_resources.add_command(user_resources_list)
user_resources.add_command(user_resouce_create)
|
AzureTRE/cli/tre/commands/workspaces/workspace_services/user_resources/user_resources.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/workspaces/workspace_services/user_resources/user_resources.py",
"repo_id": "AzureTRE",
"token_count": 1202
}
| 101 |
# 'External' storage account - drop location for import
resource "azurerm_storage_account" "sa_import_external" {
name = local.import_external_storage_name
location = var.location
resource_group_name = var.resource_group_name
account_tier = "Standard"
account_replication_type = "LRS"
# Don't allow anonymous access (unrelated to the 'public' networking rules)
allow_nested_items_to_be_public = false
# Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done.
# This is true ONLY when Hierarchical Namespace is DISABLED
is_hns_enabled = false
tags = merge(var.tre_core_tags, {
description = "airlock;import;external"
})
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_endpoint" "stg_import_external_pe" {
name = "pe-stg-import-external-blob-${var.tre_id}"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.airlock_storage_subnet_id
tags = var.tre_core_tags
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "pdzg-stg-import-external-blob-${var.tre_id}"
private_dns_zone_ids = [var.blob_core_dns_zone_id]
}
private_service_connection {
name = "psc-stg-import-external-blob-${var.tre_id}"
private_connection_resource_id = azurerm_storage_account.sa_import_external.id
is_manual_connection = false
subresource_names = ["Blob"]
}
}
# 'Approved' export
resource "azurerm_storage_account" "sa_export_approved" {
name = local.export_approved_storage_name
location = var.location
resource_group_name = var.resource_group_name
account_tier = "Standard"
account_replication_type = "LRS"
# Don't allow anonymous access (unrelated to the 'public' networking rules)
allow_nested_items_to_be_public = false
# Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done.
# This is true ONLY when Hierarchical Namespace is DISABLED
is_hns_enabled = false
tags = merge(var.tre_core_tags, {
description = "airlock;export;approved"
})
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_endpoint" "stg_export_approved_pe" {
name = "pe-stg-export-approved-blob-${var.tre_id}"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.airlock_storage_subnet_id
tags = var.tre_core_tags
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "pdzg-stg-export-approved-blob-${var.tre_id}"
private_dns_zone_ids = [var.blob_core_dns_zone_id]
}
private_service_connection {
name = "psc-stg-export-approved-blob-${var.tre_id}"
private_connection_resource_id = azurerm_storage_account.sa_export_approved.id
is_manual_connection = false
subresource_names = ["Blob"]
}
}
# 'In-Progress' storage account
resource "azurerm_storage_account" "sa_import_in_progress" {
name = local.import_in_progress_storage_name
location = var.location
resource_group_name = var.resource_group_name
account_tier = "Standard"
account_replication_type = "LRS"
allow_nested_items_to_be_public = false
# Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done.
# This is true ONLY when Hierarchical Namespace is DISABLED
is_hns_enabled = false
tags = merge(var.tre_core_tags, {
description = "airlock;import;in-progress"
})
network_rules {
default_action = var.enable_local_debugging ? "Allow" : "Deny"
bypass = ["AzureServices"]
}
lifecycle { ignore_changes = [tags] }
}
# Enable Airlock Malware Scanning on Core TRE
resource "azapi_resource_action" "enable_defender_for_storage" {
count = var.enable_malware_scanning ? 1 : 0
type = "Microsoft.Security/defenderForStorageSettings@2022-12-01-preview"
resource_id = "${azurerm_storage_account.sa_import_in_progress.id}/providers/Microsoft.Security/defenderForStorageSettings/current"
method = "PUT"
body = jsonencode({
properties = {
isEnabled = true
malwareScanning = {
onUpload = {
isEnabled = true
capGBPerMonth = 5000
},
scanResultsEventGridTopicResourceId = azurerm_eventgrid_topic.scan_result[0].id
}
sensitiveDataDiscovery = {
isEnabled = false
}
overrideSubscriptionLevelSettings = true
}
})
}
resource "azurerm_private_endpoint" "stg_import_inprogress_pe" {
name = "pe-stg-import-inprogress-blob-${var.tre_id}"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.airlock_storage_subnet_id
tags = var.tre_core_tags
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "pdzg-stg-import-inprogress-blob-${var.tre_id}"
private_dns_zone_ids = [var.blob_core_dns_zone_id]
}
private_service_connection {
name = "psc-stg-import-inprogress-blob-${var.tre_id}"
private_connection_resource_id = azurerm_storage_account.sa_import_in_progress.id
is_manual_connection = false
subresource_names = ["Blob"]
}
}
# 'Rejected' storage account
resource "azurerm_storage_account" "sa_import_rejected" {
name = local.import_rejected_storage_name
location = var.location
resource_group_name = var.resource_group_name
account_tier = "Standard"
account_replication_type = "LRS"
allow_nested_items_to_be_public = false
# Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done.
# This is true ONLY when Hierarchical Namespace is DISABLED
is_hns_enabled = false
tags = merge(var.tre_core_tags, {
description = "airlock;import;rejected"
})
network_rules {
default_action = var.enable_local_debugging ? "Allow" : "Deny"
bypass = ["AzureServices"]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_endpoint" "stg_import_rejected_pe" {
name = "pe-stg-import-rejected-blob-${var.tre_id}"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.airlock_storage_subnet_id
private_dns_zone_group {
name = "pdzg-stg-import-rejected-blob-${var.tre_id}"
private_dns_zone_ids = [var.blob_core_dns_zone_id]
}
private_service_connection {
name = "psc-stg-import-rejected-blob-${var.tre_id}"
private_connection_resource_id = azurerm_storage_account.sa_import_rejected.id
is_manual_connection = false
subresource_names = ["Blob"]
}
tags = var.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
# 'Blocked' storage account
resource "azurerm_storage_account" "sa_import_blocked" {
name = local.import_blocked_storage_name
location = var.location
resource_group_name = var.resource_group_name
account_tier = "Standard"
account_replication_type = "LRS"
allow_nested_items_to_be_public = false
# Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done.
# This is true ONLY when Hierarchical Namespace is DISABLED
is_hns_enabled = false
tags = merge(var.tre_core_tags, {
description = "airlock;import;blocked"
})
network_rules {
default_action = var.enable_local_debugging ? "Allow" : "Deny"
bypass = ["AzureServices"]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_endpoint" "stg_import_blocked_pe" {
name = "pe-stg-import-blocked-blob-${var.tre_id}"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.airlock_storage_subnet_id
private_dns_zone_group {
name = "pdzg-stg-import-blocked-blob-${var.tre_id}"
private_dns_zone_ids = [var.blob_core_dns_zone_id]
}
private_service_connection {
name = "psc-stg-import-blocked-blob-${var.tre_id}"
private_connection_resource_id = azurerm_storage_account.sa_import_blocked.id
is_manual_connection = false
subresource_names = ["Blob"]
}
tags = var.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/core/terraform/airlock/storage_accounts.tf/0
|
{
"file_path": "AzureTRE/core/terraform/airlock/storage_accounts.tf",
"repo_id": "AzureTRE",
"token_count": 3858
}
| 102 |
resource "azurerm_log_analytics_query_pack" "tre" {
name = "querypack-${var.tre_id}"
resource_group_name = var.resource_group_name
location = var.location
tags = var.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_log_analytics_query_pack_query" "rp_logs" {
query_pack_id = azurerm_log_analytics_query_pack.tre.id
display_name = "TRE Resource Processor Logs"
resource_types = ["microsoft.insights/components"] // makes it visible in appinsights
body = <<EOT
traces
| where cloud_RoleName == "resource_processor"
| where message !in ("Looking for new session...", "No sessions for this process. Will look again...")
| project timestamp, message, severityLevel, itemType, operation_Id, operation_ParentId, customDimensions
| union (
exceptions
| where cloud_RoleName == "resource_processor"
| project timestamp, problemId, severityLevel, itemType, type, method, outerType, outerMessage, outerMethod, ['details'], customDimensions, operation_Id, operation_ParentId
)
| order by timestamp desc
EOT
}
resource "azurerm_log_analytics_query_pack_query" "api_logs" {
query_pack_id = azurerm_log_analytics_query_pack.tre.id
display_name = "TRE API Logs"
resource_types = ["microsoft.insights/components"] // makes it visible in appinsights
body = <<EOT
traces
| where cloud_RoleName == "api"
| where message !in ("Looking for new session...")
| where message !startswith ("AMQP error occurred:")
| where customDimensions.fileName !startswith "/usr/local/lib/python3.8/site-packages/azure/servicebus/aio"
| where message !startswith "Unclosed client session"
| project timestamp, message, severityLevel, itemType, operation_Id, operation_ParentId, customDimensions
| union (
exceptions
| where cloud_RoleName == "api"
| project timestamp, problemId, severityLevel, itemType, type, method, outerType, outerMessage, outerMethod, ['details'], customDimensions, operation_Id, operation_ParentId
)
| order by timestamp desc
EOT
}
|
AzureTRE/core/terraform/azure-monitor/query.tf/0
|
{
"file_path": "AzureTRE/core/terraform/azure-monitor/query.tf",
"repo_id": "AzureTRE",
"token_count": 674
}
| 103 |
# For recommended Azure private DNS zone names see https://docs.microsoft.com/azure/private-link/private-endpoint-dns#azure-services-dns-zone-configuration
# Azure Monitor requires 5 DNS zones:
# - privatelink.monitor.azure.com
# - privatelink.oms.opinsights.azure.com
# - privatelink.ods.opinsights.azure.com
# - privatelink.agentsvc.azure-automation.net
# - privatelink.blob.core.windows.net (used also by Storage module)
resource "azurerm_private_dns_zone" "azure_monitor" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.monitor.azure.com"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor" {
name = "azure-monitor-link"
resource_group_name = var.resource_group_name
virtual_network_id = azurerm_virtual_network.core.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor.name
registration_enabled = false
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azure_monitor_oms_opinsights" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.oms.opinsights.azure.com"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor_oms_opinsights" {
name = "azure-monitor-link"
resource_group_name = var.resource_group_name
virtual_network_id = azurerm_virtual_network.core.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor_oms_opinsights.name
registration_enabled = false
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azure_monitor_ods_opinsights" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.ods.opinsights.azure.com"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor_ods_opinsights" {
name = "azure-monitor-link"
resource_group_name = var.resource_group_name
virtual_network_id = azurerm_virtual_network.core.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor_ods_opinsights.name
registration_enabled = false
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azure_monitor_agentsvc" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.agentsvc.azure-automation.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor_agentsvc" {
name = "azure-monitor-link"
resource_group_name = var.resource_group_name
virtual_network_id = azurerm_virtual_network.core.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor_agentsvc.name
registration_enabled = false
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
# Blob DNS zone is used by both Azure Monitor and Storage modules
resource "azurerm_private_dns_zone" "blobcore" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.blob.core.windows.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "blobcore" {
name = "blobcorelink"
resource_group_name = var.resource_group_name
private_dns_zone_name = azurerm_private_dns_zone.blobcore.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azurewebsites" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azurewebsites" {
resource_group_name = var.resource_group_name
virtual_network_id = azurerm_virtual_network.core.id
private_dns_zone_name = azurerm_private_dns_zone.azurewebsites.name
name = "azurewebsites-link"
registration_enabled = false
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "static_web" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.web.core.windows.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "webcorelink" {
name = "staticwebcorelink"
resource_group_name = var.resource_group_name
private_dns_zone_name = azurerm_private_dns_zone.static_web.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "filecore" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.file.core.windows.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "filecorelink" {
name = "filecorelink"
resource_group_name = var.resource_group_name
private_dns_zone_name = azurerm_private_dns_zone.filecore.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "vaultcore" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.vaultcore.azure.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "vaultcore" {
name = "vaultcorelink"
resource_group_name = var.resource_group_name
private_dns_zone_name = azurerm_private_dns_zone.vaultcore.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azurecr" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurecr.io"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "acrlink" {
name = "acrcorelink"
resource_group_name = var.resource_group_name
private_dns_zone_name = azurerm_private_dns_zone.azurecr.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "eventgrid" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.eventgrid.azure.net"]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "eventgridlink" {
name = "eventgrid-link"
resource_group_name = var.resource_group_name
private_dns_zone_name = azurerm_private_dns_zone.eventgrid.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "private_dns_zones" {
for_each = local.private_dns_zone_names
name = module.terraform_azurerm_environment_configuration.private_links[each.key]
resource_group_name = var.resource_group_name
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "private_dns_zone_links" {
for_each = azurerm_private_dns_zone.private_dns_zones
name = each.value.name
resource_group_name = var.resource_group_name
private_dns_zone_name = each.value.name
virtual_network_id = azurerm_virtual_network.core.id
tags = local.tre_core_tags
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/core/terraform/network/dns_zones.tf/0
|
{
"file_path": "AzureTRE/core/terraform/network/dns_zones.tf",
"repo_id": "AzureTRE",
"token_count": 3791
}
| 104 |
#!/bin/bash
set -e
script_dir=$(realpath "$(dirname "${BASH_SOURCE[0]}")")
if [[ -z ${STORAGE_ACCOUNT} ]]; then
echo "STORAGE_ACCOUNT not set"
exit 1
fi
# The storage account is protected by network rules
#
# The rules need to be temporarily lifted so that the script can determine if the index.html file
# already exists and, if not, create it. The firewall rules also need lifting so that the
# certificate can be uploaded.
#
# By default, this process adds the IP address of the machine running this script to the allow-list
# of the storage account network rules. In some situations this approach may not work. For example,
# where the machine running this script (an AzDo build agent, for example), and the storage account
# are both on the same private network, and the public IP of the machine running the script is never
# used. In this situation, you may need to drop the default Deny rule.
#
# If the environment variable LETSENCRYPT_DROP_ALL_RULES=1 is set then this script will drop the
# default Deny rule, and then re-enable it once the script is complete, rather add the IP address
# to the allow rules.
if [[ "${LETSENCRYPT_DROP_ALL_RULES}" == "1" ]]; then
echo "Removing default DENY rule on storage account ${STORAGE_ACCOUNT}"
az storage account update \
--default-action Allow \
--name "${STORAGE_ACCOUNT}" \
--resource-group "${RESOURCE_GROUP_NAME}"
else
if [[ -z ${PUBLIC_DEPLOYMENT_IP_ADDRESS:-} ]]; then
IPADDR=$(curl ipecho.net/plain; echo)
else
IPADDR=${PUBLIC_DEPLOYMENT_IP_ADDRESS}
fi
echo "Creating network rule on storage account ${STORAGE_ACCOUNT} for $IPADDR"
az storage account network-rule add \
--account-name "${STORAGE_ACCOUNT}" \
--resource-group "${RESOURCE_GROUP_NAME}" \
--ip-address "$IPADDR"
fi
echo "Waiting for network rule to take effect"
sleep 30s
echo "Created network rule on storage account"
echo "Checking for index.html file in storage account"
# Create the default index.html page
cat << EOF > index.html
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml"><head><meta charset="utf-8"/><title></title></head><body></body></html>
EOF
# shellcheck disable=SC2016
indexExists=$(az storage blob list -o json \
--account-name "${STORAGE_ACCOUNT}" \
--auth-mode login \
--container-name '$web' \
--query "[?name=='index.html'].name" \
| jq 'length')
if [[ ${indexExists} -lt 1 ]]; then
echo "Uploading index.html file"
# shellcheck disable=SC2016
az storage blob upload \
--account-name "${STORAGE_ACCOUNT}" \
--auth-mode login \
--container-name '$web' \
--file index.html \
--name index.html \
--no-progress \
--only-show-errors
# Wait a bit for the App Gateway health probe to notice
echo "Waiting 30s for health probe"
sleep 30s
else
echo "index.html already present"
fi
ledir=$(pwd)/letsencrypt
mkdir -p "${ledir}/logs"
# Initiate the ACME challange
/opt/certbot/bin/certbot certonly \
--config-dir "${ledir}" \
--work-dir "${ledir}" \
--logs-dir "${ledir}"/logs \
--manual \
--preferred-challenges=http \
--manual-auth-hook "${script_dir}"/auth-hook.sh \
--manual-cleanup-hook "${script_dir}"/cleanup-hook.sh \
--domain "$FQDN" \
--non-interactive \
--agree-tos \
--register-unsafely-without-email
# Convert the generated certificate to a .pfx
CERT_DIR="${ledir}/live/$FQDN"
CERT_PASSWORD=$(openssl rand -base64 30)
openssl pkcs12 -export \
-inkey "${CERT_DIR}/privkey.pem" \
-in "${CERT_DIR}/fullchain.pem" \
-out "${CERT_DIR}/aci.pfx" \
-passout "pass:${CERT_PASSWORD}"
if [[ -n ${KEYVAULT} ]]; then
sid=$(az keyvault certificate import \
-o json \
--vault-name "${KEYVAULT}" \
--name 'letsencrypt' \
--file "${CERT_DIR}/aci.pfx" \
--password "${CERT_PASSWORD}" \
| jq -r '.sid')
az network application-gateway ssl-cert update \
--resource-group "${RESOURCE_GROUP_NAME}" \
--gateway-name "${APPLICATION_GATEWAY}" \
--name 'cert-primary' \
--key-vault-secret-id "${sid}"
else
az network application-gateway ssl-cert update \
--resource-group "${RESOURCE_GROUP_NAME}" \
--gateway-name "${APPLICATION_GATEWAY}" \
--name 'letsencrypt' \
--cert-file "${CERT_DIR}/aci.pfx" \
--cert-password "${CERT_PASSWORD}"
fi
if [[ "${LETSENCRYPT_DROP_ALL_RULES}" == "1" ]]; then
echo "Resetting the default DENY rule on storage account ${STORAGE_ACCOUNT}"
az storage account update \
--default-action Deny \
--name "${STORAGE_ACCOUNT}" \
--resource-group "${RESOURCE_GROUP_NAME}"
else
echo "Ressetting network rule on storage account (removing $IPADDR from allow list)"
az storage account network-rule remove \
--account-name "${STORAGE_ACCOUNT}" \
--resource-group "${RESOURCE_GROUP_NAME}" \
--ip-address "${IPADDR}"
fi
|
AzureTRE/core/terraform/scripts/letsencrypt.sh/0
|
{
"file_path": "AzureTRE/core/terraform/scripts/letsencrypt.sh",
"repo_id": "AzureTRE",
"token_count": 1899
}
| 105 |
#!/bin/bash
# This script is designed to be `source`d to create reusable helper functions
# Notes: Before Az CLI 2.37 this would return a json document with .objectId; that is now .id
# This script polls looking for an app registration with the given ID.
# If after the number of retries no app registration is found, the function exits.
function wait_for_new_app_registration()
{
local clientId=$1
local retries=10
local counter=0
objectId=$(az ad app list --filter "appId eq '${clientId}'" --query '[0].id' --output tsv --only-show-errors)
while [[ -z $objectId && $counter -lt $retries ]]; do
counter=$((counter+1))
echo "Waiting for app registration with ID ${clientId} to show up (${counter}/${retries})..."
sleep 5
objectId=$(az ad app list --filter "appId eq '${clientId}'" --query '[0].id' --output tsv --only-show-errors)
done
if [[ -z $objectId ]]; then
echo "Failed"
exit 1
fi
echo "App registration \"${clientId}\" found."
}
|
AzureTRE/devops/scripts/aad/wait_for_new_app_registration.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/aad/wait_for_new_app_registration.sh",
"repo_id": "AzureTRE",
"token_count": 331
}
| 106 |
#!/bin/bash
set -o errexit
set -o pipefail
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
activeDirectoryUri="$(az cloud show --query endpoints.activeDirectory --output tsv)"
if [ -n "${TEST_ACCOUNT_CLIENT_ID:-}" ] && [ -n "${TEST_ACCOUNT_CLIENT_SECRET:-}" ] && [ -n "${AAD_TENANT_ID:-}" ] && [ -n "${API_CLIENT_ID:-}" ]
then
# Use client credentials flow with TEST_ACCOUNT_CLIENT_ID/SECRET
echo "Using TEST_ACCOUNT_CLIENT_ID to get token via client credential flow"
token_response=$(curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' \
"${activeDirectoryUri}/${AAD_TENANT_ID}"/oauth2/v2.0/token \
-d "client_id=${TEST_ACCOUNT_CLIENT_ID}" \
-d 'grant_type=client_credentials' \
-d "scope=api://${API_CLIENT_ID}/.default" \
-d "client_secret=${TEST_ACCOUNT_CLIENT_SECRET}")
elif [ -n "${API_CLIENT_ID:-}" ] && [ -n "${TEST_APP_ID:-}" ] && [ -n "${TEST_USER_NAME:-}" ] && [ -n "${TEST_USER_PASSWORD:-}" ] && [ -n "${AAD_TENANT_ID:-}" ]
then
# Use resource owner password credentials flow with USERNAME/PASSWORD
echo "Using TEST_USER_NAME to get token via resource owner password credential flow"
token_response=$(curl -X POST -H "Content-Type: application/x-www-form-urlencoded" -d \
"grant_type=password&resource=""${API_CLIENT_ID}""&client_id=""${TEST_APP_ID}""&username=""${TEST_USER_NAME}""&password=""${TEST_USER_PASSWORD}""&scope=default)" \
"${activeDirectoryUri}/${AAD_TENANT_ID}"/oauth2/token)
fi
if [ -n "${token_response:-}" ]
then
ACCESS_TOKEN=$(echo "${token_response}" | jq -r .access_token)
if [[ "${ACCESS_TOKEN}" == "null" ]]; then
echo "Failed to obtain auth token for API:"
echo "${token_response}"
exit 2
fi
export ACCESS_TOKEN
fi
|
AzureTRE/devops/scripts/get_access_token.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/get_access_token.sh",
"repo_id": "AzureTRE",
"token_count": 719
}
| 107 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.51.0"
constraints = "3.51.0"
hashes = [
"h1:X13zyweEi+honSpCdijEJBawbG6m1NmGyWDccICIKXs=",
"zh:045a56f984882b8cf111301550e14a51346c817ec0b3d6dc752f7533585ed99b",
"zh:102fa2fb9213f20c2834b7abb51d44f1c766bb28ad4f14c98d2c149faba0a911",
"zh:173e38d128bf559b4a3e4bf5511974ab87951ffad9460f769067f62edc66acc7",
"zh:22b7f74a6bf86fa4f735783331335b9c4783d5437c672a7d2579cedea8463e3b",
"zh:3e37e83a5f39f73fa3b310162ef9fc58449445aaeb2ddad66404251ceb6908a5",
"zh:6609b1c63ea9466bea599f6e1e32573a13889db6be89dd068c0eb114f7de50d5",
"zh:6a2cc4ab06a467369f03c4e1fb1eeb23f0ea8a98fb1c0651284fe45ca3d5b02f",
"zh:91885a417d4fd1cdc81b64d26330dacf608ef2f9d272b8f7073d93e71d6bccef",
"zh:96d1879e52f399f3c813bcf36c7ceee72273a3e020077d09b03f6b7fdef4430c",
"zh:dcdae19688d83dbf5dbc4c75d4c5f2d6d8abeb4fddc404c20880f9f3fa22c3ed",
"zh:e2e11ffae353f43f4bf4bb88fa386fb214b8dd4aebfe7665b3142484652f9651",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
|
AzureTRE/devops/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/devops/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 693
}
| 108 |
# User roles
The Azure TRE solution has 8 different user roles defined. The roles are modeled around a set of tasks for each role. The roles are not mutually exclusive, and one person can have multiple roles assigned to be able to carry out a broader set of tasks.
Before you deploy a Trusted Research Environment based on the Azure TRE solution, you should consider your scenario and have an understanding of which of these roles that needs to be staffed.
## Role overview
While we have defined 8 different user roles for the Azure TRE solution, not all of them are required in all scenarios. Three of the roles support role-based access control (RBAC) within the TRE.
| Role | Key task | TRE RBAC |
|------|----------|----------|
| Azure administrator | Deploy the TRE | |
| TRE administrator | Administer the TRE | ✔ |
| TRE workspace owner | Own a workspace | ✔ |
| Researcher | Perform research on the data | ✔ |
| Airlock Manager | Approves data import & export | ✔ |
| TRE service integrator | Integrate additional workspace services | |
| Azure TRE developer | Extend the TRE OSS solution | |
| Data engineer | Move data to and potentially from the TRE | |
| Information security officer | Validate and sign-off TRE deployment | |
!!! info
More granular RBAC information is available [here](../tre-developers/api-permissions-map.md).
## Azure administrator
Provisions the Azure TRE solution in an Azure subscription and performs tasks that require knowledge of Azure operations and has access to the Azure subscription.
Example tasks:
- Provision Azure TRE solution instances.
- Second line support for TRE administrators, TRE workspace owners and Researchers when Azure TRE troubleshooting is required.
- Work with the data engineer to connect the Azure TRE with the data platform.
- Troubleshoot provisioning issues and failed deployments.
- Manage TRE administrator users.
- Manage data backups and restores.
- Update the Azure TRE instances.
- Configure log and metrics alerts.
Expected skills:
- Azure administration and operations.
- Infrastructure as Code (Terraform, ARM, Git)
- PowerShell, Bash
## TRE administrator
Day-to-day running and operations of the Azure TRE instance without touching Azure resources.
Example tasks:
- Manage workspace owner users.
- Provision workspaces.
- Manage shared services e.g., available packages in package mirror shared service.
- Monitor workspace usage and billing.
- Set and manage quotas.
- Create and manage workspaces
Expected skills:
- Limited or no Azure knowledge expected.
## TRE workspace owner
Owns a specific workspace and has additional privileges than the researcher within the workspace. Is most likely also a *Researcher*.
Example tasks:
- Manage Researcher users.
- Export data from workspace.
- Import data and make it available within the workspace.
- Enable services within the workspace.
- Monitor billing and usage of the workspace.
- Create and manage workspace services
Expected skills:
- Limited or no Azure knowledge expected.
## Researcher
Has access to one specific workspace and can use all the services provisioned within that workspace.
Example tasks:
- Import software packages needed to conduct research (PyPi, Conda, Apt).
- Perform research using the services in the workspace.
- Create and manage user resources
Expected skills:
- Python, R
- Git
- Linux
## Airlock Manager
Approves (and reviews in some instances) the data that is being imported to and exported from a TRE Workspace
Example tasks:
- Approve Airlock import requests
- Approve Airlock export requests
- Review the data being imported to and exported from a TRE Workspace
## TRE service integrator
Integrates workspace service types with an Azure TRE instance. This involves extending the Azure Infrastructure as Code templates to make a workspace service available within an Azure TRE instance.
Example tasks:
- Integrate a workspace service type with your Azure TRE instance.
- Implement Infrastructure as Code templates for new workspace service types.
Expected skills:
- Infrastructure as Code (Terraform, ARM, Git)
- Python, Bash
- Azure administration
## Azure TRE developer
Software developer who contributes to the development of the Azure TRE solution.
Example tasks:
- Modify the deployment service, API and other components of the Azure TRE solution.
- Contribute to the Azure TRE OSS solution.
Expected skills:
- Python, Bash
- Infrastructure as Code (Terraform, ARM, Git
- Azure administration
## Data engineer
Supporting role that is expected to build data movement pipelines between the data platform (not part of the TRE), and the TRE instance.
Example tasks:
- Transfer data from the data platform to the TRE and potentially back.
- Create data movement and transformation pipelines.
Expected skills:
- Python, Bash, Linux
- Azure Data Factory, Other ETL tools.
## Information Security Officer
Needs to understand the security posture of the TRE to ensure that the organization is compliant with the information governance framework and additional relevant regulations.
Example tasks:
- Use the Azure TRE documentation to understand the security posture of the TRE.
- Work with Azure administrator and TRE administrator to enforce the required security and privacy controls on the TRE.
- Commission penetration testing.
- Work with organization Information Governance committee to validate and sign-off Azure TRE deployment
|
AzureTRE/docs/azure-tre-overview/user-roles.md/0
|
{
"file_path": "AzureTRE/docs/azure-tre-overview/user-roles.md",
"repo_id": "AzureTRE",
"token_count": 1231
}
| 109 |
# Workspace Applications
## Purpose
Access to workspaces is also controlled using app registrations - one per workspace. The configuration of the app registration depends on the nature of the workspace, but this section covers the typical minimum settings.
## Application Roles
| Display name | Description | Allowed member types | Value |
| ------------ | ----------- | -------------------- | ----- |
| Workspace Owner | Provides workspace owners access to the Workspace. | Users/Groups,Applications | `WorkspaceOwner` |
| Workspace Researcher | Provides researchers access to the Workspace. | Users/Groups,Applications | `WorkspaceResearcher` |
| Airlock Manager | Provides airlock managers access to the Workspace and ability to review airlock requests. | Users/Groups,Applications | `AirlockManager` |
## Microsoft Graph Permissions
| Name | Type* | Admin consent required | TRE usage |
| --- | -- | -----| --------- |
|email|Delegated|No|Used to read the user's email address when creating TRE resources|
|openid|Delegated|No|Allows users to sign in to the app with their work or school accounts and allows the app to see basic user profile information.|
|profile|Delegated|No|Used to read the user's profile when creating TRE resources|
'*' See the difference between [delegated and application permission](https://docs.microsoft.com/graph/auth/auth-concepts#delegated-and-application-permissions) types. See [Microsoft Graph permissions reference](https://docs.microsoft.com/graph/permissions-reference) for more details.
## Clients
This identity should only be used by the API Application.
## How to create
There are two mechanisms for creating Workspace Applications
- Manually by your Microsoft Entra ID Tenant Admin (default)
- Automatically by TRE. Please see this [guide](./application_admin.md) if you wish this to be automatic.
!!! caution
By default, the app registration for a workspace is not created by the [API](../../tre-developers/api.md). One needs to be present before using the API to provision a new workspace. If you ran `make auth`, a workspace AD application was created for you. If you wish to create another, the same script can be used to create the **Workspace Application**.
Example on how to run the script:
```bash
./devops/scripts/aad/create_workspace_application.sh \
--name "${TRE_ID} - workspace 11" \
--admin-consent \
--ux-clientid "${SWAGGER_UI_CLIENT_ID}" \
--automation-clientid "${TEST_ACCOUNT_CLIENT_ID}" \
--application-admin-clientid "${APPLICATION_ADMIN_CLIENT_ID}"
```
| Argument | Description |
| -------- | ----------- |
| `--name` | The name of the application. This will be suffixed with 'API' by the script. |
| `--ux-clientid` | This value is one of the outputs when you first ran the script. It is mandatory if you use admin-consent. |
| `--admin-consent` | Grants admin consent for the app registrations. This is required for them to function properly, but requires Microsoft Entra ID admin privileges. |
| `--automation-clientid` | This is an optional parameter but will grant the Automation App (created in step 1) permission to the new workspace app. |
| `--application-admin-clientid` | This is a required parameter , and should be a client id that will be added to the Owners of the Microsoft Entra ID Application so that it can be administered within TRE. |
| `--reset-password` | Optional, default is 0. When run in a headless fashion, 1 is passed in to always reset the password. |
!!! caution
The script will create an app password (client secret) for the workspace and write to `/config.yaml` under the authentication section. These values are only shown once, if you lose them, the script will create new secrets if run again.
If you do not wish to grant the Automation App permission to your workspace, just remove the `--automation-clientid` from the command.
## Environment Variables
| Variable | Description | Location |
| -------- | ----------- | -------- |
|WORKSPACE_API_CLIENT_ID|The Client Id|`./config.yaml`|
|WORKSPACE_API_CLIENT_SECRET|The client secret|`./config.yaml`|
## Comments
When the Workspace Microsoft Entra ID app is registered by running `make auth`, the `Workspace Scope Id` is the same as the Client Id. When the Workspace Microsoft Entra ID app is created by the base workspace, the `Workspace Scope Id` will be in this format `api://<TRE_ID>_ws_<WORKSPACE_SHORT_IDENTIFIER>`
|
AzureTRE/docs/tre-admins/identities/workspace.md/0
|
{
"file_path": "AzureTRE/docs/tre-admins/identities/workspace.md",
"repo_id": "AzureTRE",
"token_count": 1161
}
| 110 |
# GitHub Actions workflows (CI/CD)
To deploy the Azure TRE using GitHub workflows, create a fork of the repository.
Deployment is done using the `/.github/workflows/deploy_tre.yml` workflow. This method is also used to deploy the dev/test environment for the original Azure TRE repository.
## Setup instructions
Before you can run the `deploy_tre.yml` workflow there are some one-time configuration steps that we need to do, similar to the Pre-deployment steps for manual deployment.
!!! tip
In some of the steps below, you are asked to configure repository secrets. Follow the [GitHub guide](https://docs.github.com/en/actions/security-guides/encrypted-secrets) on creating repository secrets if you are unfamiliar with this step.
1. Create a service principal for the subscription so that the workflow can provision Azure resources.
1. Decide on a TRE ID and the location for the Azure resources
1. Create app registrations for API authentication
1. Create app registrations and a user for the E2E tests
1. Create a workspace app registration for setting up workspaces (for the E2E tests)
1. Create a Teams WebHook for deployment notifications
1. Configure repository secrets
1. Deploy the TRE using the workflow
### Create a service principal for provisioning resources
1. Login to Azure
Log in to Azure using `az login` and select the Azure subscription you wish to deploy Azure TRE to:
```cmd
az login
az account list
az account set --subscription <subscription ID>
```
See [Sign in with Azure CLI](https://docs.microsoft.com/cli/azure/authenticate-azure-cli) for more details.
1. Create a service principal
A service principal needs to be created to authorize CI/CD workflows to provision resources for the TRE workspaces and workspace services.
Create a main service principal with "**Owner**" role:
```cmd
az ad sp create-for-rbac --name "sp-aztre-cicd" --role Owner --scopes /subscriptions/<subscription_id> --sdk-auth
```
!!! caution
Save the JSON output locally - as you will need it later for setting secrets in the build
1. Create a repository secret named `AZURE_CREDENTIALS` and use the JSON output from the previous step as its value. Note it should look similar to this:
```json
{
"clientId": "",
"clientSecret": "",
"subscriptionId": "",
"tenantId": ""
}
```
### Decide on a TRE ID and Azure resources location
Configure the TRE ID and LOCATION repository secrets
| <div style="width: 230px">Secret name</div> | Description |
| ----------- | ----------- |
| `TRE_ID` | A globally unique identifier. `TRE_ID` can be found in the resource names of the Azure TRE instance; for example, a `TRE_ID` of `tre-dev-42` will result in a resource group name for Azure TRE instance of `rg-tre-dev-42`. This must be less than 12 characters. Allowed characters: lowercase alphanumerics. |
| `LOCATION` | The Azure location (region) for all resources. E.g. `westeurope` |
### Create app registrations for API authentication
Follow the instructions to run the **app registration script** in the [Authentication and Authorization document](../auth.md#app-registrations). Use the values for TRE ID and LOCATION from above.
Configure the TRE API and Swagger UI repository secrets
| <div style="width: 230px">Secret name</div> | Description |
| ----------- | ----------- |
| `AAD_TENANT_ID` | The tenant ID of the Microsoft Entra ID. |
| `SWAGGER_UI_CLIENT_ID` | The application (client) ID of the TRE Swagger UI app. |
| `API_CLIENT_ID` | The application (client) ID of the TRE API app. |
| `API_CLIENT_SECRET` | The application password (client secret) of the TRE API app. |
### Create an app registration and a user for the E2E tests
Follow the instructions to [create an app registration and a test user for the E2E tests in the Authentication and Authorization](../auth.md#tre-e2e-test) document.
Configure the E2E Test repository secrets
| <div style="width: 230px">Secret name</div> | Description |
| ----------- | ----------- |
| `TEST_APP_ID` | The application (client) ID of the E2E Test app |
| `TEST_USER_NAME` | The username of the E2E Test User |
| `TEST_USER_PASSWORD` | The password of the E2E Test User |
### Create a workspace app registration for setting up workspaces (for the E2E tests)
Follow the [instructions to create a workspace app registration](../auth.md#workspaces) (used for the E2E tests) - and make the E2E test user a **WorkspaceOwner** for the app registration.
Configure the TEST_WORKSPACE_APP_ID repository secret
| <div style="width: 230px">Secret name</div> | Description |
| ----------- | ----------- |
| `TEST_WORKSPACE_APP_ID` | The application (client) ID of the Workspaces app. |
| `TEST_WORKSPACE_APP_SECRET` | The application (client) secret of the Workspaces app. |
### Create a Teams Webhook for deployment notifications
The `deploy_tre.yml` workflow sends a notification to a Microsoft Teams channel when it finishes running.
!!! note
If you don't want to notify a channel, you can also remove the **Notify dedicated teams channel** steps in the workflow
1. Follow the [Microsoft Docs](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook) to create a webhook for your channel
1. Configure the MS_TEAMS_WEBHOOK_URI repository secret
| <div style="width: 230px">Secret name</div> | Description |
| ----------- | ----------- |
| `MS_TEAMS_WEBHOOK_URI` | URI for the Teams channel webhook |
### Configure repository/environment secrets
Configure additional secrets used in the deployment workflow:
| <div style="width: 230px">Secret name</div> | Description |
| ----------- | ----------- |
| `TRE_ID` | A globally unique identifier. `TRE_ID` can be found in the resource names of the Azure TRE instance; for example, a `TRE_ID` of `tre-dev-42` will result in a resource group name for Azure TRE instance of `rg-tre-dev-42`. This must be less than 12 characters. Allowed characters: lowercase alphanumerics. |
| `MGMT_RESOURCE_GROUP_NAME` | The name of the shared resource group for all Azure TRE core resources. |
| `MGMT_STORAGE_ACCOUNT_NAME` | The name of the storage account to hold the Terraform state and other deployment artifacts. E.g. `mystorageaccount`. |
| `ACR_NAME` | A globally unique name for the Azure Container Registry (ACR) that will be created to store deployment images. |
### Configure repository/environment variables
Configure variables used in the deployment workflow:
| <div style="width: 230px">Variable name</div> | Description |
| ----------- | ----------- |
| `LOCATION` | The Azure location (region) for all resources. E.g. `westeurope` |
| `TERRAFORM_STATE_CONTAINER_NAME` | Optional. The name of the blob container to hold the Terraform state. Default value is `tfstate`. |
| `CORE_ADDRESS_SPACE` | Optional. The address space for the Azure TRE core virtual network. Default value is `10.0.0.0/22`. |
| `TRE_ADDRESS_SPACE` | Optional. The address space for the whole TRE environment virtual network where workspaces networks will be created (can include the core network as well). Default value is `10.0.0.0/16`|
| `AZURE_ENVIRONMENT` | Optional. The name of the Azure environment. Supported values are `AzureCloud` and `AzureUSGovernment`. Default value is `AzureCloud`. |
| `CORE_APP_SERVICE_PLAN_SKU` | Optional. The SKU used for AppService plan for core infrastructure. Default value is `P1v2`. |
| `WORKSPACE_APP_SERVICE_PLAN_SKU` | Optional. The SKU used for AppService plan used in E2E tests. Default value is `P1v2`. |
| `RESOURCE_PROCESSOR_NUMBER_PROCESSES_PER_INSTANCE` | Optional. The number of processes to instantiate when the Resource Processor starts. Equates to the number of parallel deployment operations possible in your TRE. Defaults to `5`. |
| `ENABLE_SWAGGER` | Optional. Determines whether the Swagger interface for the API will be available. Default value is `false`. |
### Deploy the TRE using the workflow
With all the repository secrets set, you can trigger a workflow run by pushing to develop/main of your fork, or by dispatching the workflow manually.
|
AzureTRE/docs/tre-admins/setup-instructions/workflows.md/0
|
{
"file_path": "AzureTRE/docs/tre-admins/setup-instructions/workflows.md",
"repo_id": "AzureTRE",
"token_count": 2267
}
| 111 |
# Pipeline Templates
Occasionally there will be a need for the deployment / update of one resource to affect a change in another. This section outlines how that can be achieved with Pipeline Templates.
## Overview
A pipeline template is an optional `pipeline: {}` block that can be added to the top level of a resource schema document. It allows a template developer to define actions to run against other resources before and after the primary resource is deployed.
### Example
Consider the following `template_schema.json`:
```json
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-dev-vm/template_schema.json",
...
"properties": {...},
"pipeline": {
"install": [
{
"stepId": "6d2d7eb7-984e-4330-bd3c-c7ec98658402",
"stepTitle": "Update the firewall name",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared_service",
"resourceAction": "upgrade",
"properties": [
{
"name": "display_name",
"type": "string",
"value": "A new name here!"
}]
},
{
"stepId": "main"
},
{
"stepId": "2fe8a6a7-2c27-4c49-8773-127df8a48b4e",
...
}
]
}
}
```
When a user deploys this resource, the API will read the `install: []` array within the `pipeline: {}` block, and will:
- Orchestrate the `upgrade` of the `tre-shared-service-firewall`, changing the `display_name` property to `A new name here!`.
- Run the `main` (primary resource) install
- Complete the next step
A single `Operation` document will be used to keep track of which steps in the deployment chain have completed.
## Current Limitations
This feature is undergoing active development, and is currently limited in the following ways:
- Only statically addressable resources can be referred to - `shared_services`, as these are singletons and can be referenced by a template name.
- Only the `upgrade` action for each secondary resource is supported. Support for `install` / `uninstall` of secondary resources is planned.
- No current planned support for `customActions`.
|
AzureTRE/docs/tre-templates/pipeline-templates/overview.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/pipeline-templates/overview.md",
"repo_id": "AzureTRE",
"token_count": 753
}
| 112 |
# Setting the logging level to DEBUG on the Resource Processor and API
For security, the API and Resource PRocessor are configured to not show detailed error messages and stack trace when an error occurs.
You can enable debugging on the API and Resource Processor by setting `logging_level=debug` under developer_settings section in your`config.yaml` file.
Once set, you need to run `make deploy-core` to update the settings on the API and Resource Processor. You should start to see logs with severity level `0` appear in the Application Insights logs.
|
AzureTRE/docs/troubleshooting-faq/debug-logs.md/0
|
{
"file_path": "AzureTRE/docs/troubleshooting-faq/debug-logs.md",
"repo_id": "AzureTRE",
"token_count": 125
}
| 113 |
import asyncio
import logging
from httpx import AsyncClient, Timeout
import os
from urllib.parse import urlparse
from azure.storage.blob import BlobClient
from airlock import strings
from e2e_tests.helpers import get_auth_header, get_full_endpoint
LOGGER = logging.getLogger(__name__)
TIMEOUT = Timeout(10, read=30)
async def post_request(payload, endpoint, access_token, verify, assert_status):
async with AsyncClient(verify=verify, timeout=TIMEOUT) as client:
full_endpoint = get_full_endpoint(endpoint)
auth_headers = get_auth_header(access_token)
LOGGER.info(f"posting to {endpoint} with payload:\n{payload}")
response = await client.post(
full_endpoint, headers=auth_headers, json=payload, timeout=TIMEOUT
)
LOGGER.info(
f"Response Status code: {response.status_code} Content: {response.content}"
)
assert response.status_code == assert_status
return response.json()
async def get_request(endpoint, access_token, verify, assert_status):
async with AsyncClient(verify=verify, timeout=TIMEOUT) as client:
full_endpoint = get_full_endpoint(endpoint)
auth_headers = get_auth_header(access_token)
auth_headers["accept"] = "application/json"
response = await client.get(
full_endpoint, headers=auth_headers, timeout=TIMEOUT
)
LOGGER.info(
f"Response Status code: {response.status_code} Content: {response.content}"
)
assert response.status_code == assert_status
return response.json()
async def upload_blob_using_sas(file_path: str, sas_url: str):
async with AsyncClient(timeout=30.0) as client:
parsed_sas_url = urlparse(sas_url)
# Remove first / from path
if parsed_sas_url.path[0] == "/":
container_name = parsed_sas_url.path[1:]
else:
container_name = parsed_sas_url.path
storage_account_url = f"{parsed_sas_url.scheme}://{parsed_sas_url.netloc}/"
file_name = os.path.basename(file_path)
_, file_ext = os.path.splitext(file_name)
blob_url = f"{storage_account_url}{container_name}/{file_name}?{parsed_sas_url.query}"
LOGGER.info(f"uploading [{file_name}] to container [{blob_url}]")
client = BlobClient.from_blob_url(blob_url)
with open(file_name, 'rb') as data:
response = client.upload_blob(data)
return response
async def wait_for_status(
request_status: str, workspace_owner_token, workspace_path, request_id, verify
):
while True:
request_result = await get_request(
f"/api{workspace_path}/requests/{request_id}",
workspace_owner_token,
verify,
200,
)
current_status = request_result[strings.AIRLOCK_REQUEST][strings.AIRLOCK_REQUEST_STATUS]
if (current_status == request_status):
break
if (is_final_status(current_status)):
status = request_result[strings.AIRLOCK_REQUEST].get(strings.AIRLOCK_REQUEST_STATUS_MESSAGE)
LOGGER.error(f"Airlock request ended with unexpected status: {current_status}. reason: {status}")
raise Exception("Airlock request unexpected status.")
LOGGER.info(f"Waiting for request status: {request_status}, current status is {current_status}")
await asyncio.sleep(5)
def is_final_status(status):
return status in [strings.APPROVED_STATUS, strings.REJECTED_STATUS, strings.CANCELLED_STATUS, strings.BLOCKED_STATUS, strings.FAILED_STATUS]
|
AzureTRE/e2e_tests/airlock/request.py/0
|
{
"file_path": "AzureTRE/e2e_tests/airlock/request.py",
"repo_id": "AzureTRE",
"token_count": 1490
}
| 114 |
import asyncio
import pytest
import config
from e2e_tests.conftest import disable_and_delete_tre_resource, disable_and_delete_ws_resource
from resources.workspace import get_workspace_auth_details
from resources.resource import post_resource
from resources import strings
from helpers import get_admin_token
pytestmark = pytest.mark.asyncio
@pytest.mark.performance
@pytest.mark.timeout(3000)
async def test_parallel_resource_creations(verify) -> None:
"""Creates N workspaces in parallel, and creates a workspace service in each, in parallel"""
number_workspaces = 2
tasks = []
for i in range(number_workspaces):
payload = {
"templateName": strings.BASE_WORKSPACE,
"properties": {
"display_name": f'Perf Test Workspace {i}',
"description": "workspace for perf test",
"address_space_size": "small",
"auth_type": "Manual",
"client_id": f"{config.TEST_WORKSPACE_APP_ID}"
}
}
admin_token = await get_admin_token(verify)
task = asyncio.create_task(post_resource(payload=payload, endpoint=strings.API_WORKSPACES, access_token=admin_token, verify=verify))
tasks.append(task)
resource_paths = await asyncio.gather(*tasks)
# Now disable + delete them all in parallel
tasks = []
for workspace_path, _ in resource_paths:
task = asyncio.create_task(disable_and_delete_tre_resource(workspace_path, verify))
tasks.append(task)
await asyncio.gather(*tasks)
@pytest.mark.skip
@pytest.mark.performance
@pytest.mark.timeout(3000)
async def test_bulk_updates_to_ensure_each_resource_updated_in_series(verify) -> None:
"""Optionally creates a workspace and workspace service,
then creates N number of VMs in parallel, patches each, and deletes them"""
number_vms = 5
number_updates = 5
workspace_id = config.TEST_WORKSPACE_ID
if workspace_id == "":
# create the workspace to use
payload = {
"templateName": strings.BASE_WORKSPACE,
"properties": {
"display_name": "E2E test guacamole service",
"description": "",
"address_space_size": "small",
"auth_type": "Manual",
"client_id": f"{config.TEST_WORKSPACE_APP_ID}",
"client_secret": f"{config.TEST_WORKSPACE_APP_SECRET}"
}
}
admin_token = await get_admin_token(verify)
workspace_path, workspace_id = await post_resource(payload, strings.API_WORKSPACES, admin_token, verify)
else:
workspace_path = f"/workspaces/{workspace_id}"
workspace_owner_token, scope_uri = await get_workspace_auth_details(admin_token=admin_token, workspace_id=workspace_id, verify=verify)
workspace_service_id = config.TEST_WORKSPACE_SERVICE_ID
if workspace_service_id == "":
# create a guac service
service_payload = {
"templateName": strings.GUACAMOLE_SERVICE,
"properties": {
"display_name": "Workspace service test",
"description": ""
}
}
workspace_service_path, _ = await post_resource(
payload=service_payload,
endpoint=f'/api{workspace_path}/{strings.API_WORKSPACE_SERVICES}',
access_token=workspace_owner_token,
verify=verify)
else:
workspace_service_path = f"{workspace_path}/{strings.API_WORKSPACE_SERVICES}/{workspace_service_id}"
# Create the VMs in parallel, and wait for them to be created
user_resource_payload = {
"templateName": "tre-service-dev-vm",
"properties": {
"display_name": "Perf test VM",
"description": "",
"os_image": "Ubuntu 18.04"
}
}
tasks = []
for i in range(number_vms):
task = asyncio.create_task(post_resource(
payload=user_resource_payload,
endpoint=f'/api{workspace_service_path}/{strings.API_USER_RESOURCES}',
access_token=workspace_owner_token,
verify=verify))
tasks.append(task)
resource_paths = await asyncio.gather(*tasks)
# Now patch each VM multiple times each, without waiting for the patch to complete, so the messages stack up - even if the RP has spare processors.
# Then disable / delete each one, with the wait. This performs a PATCH then DELETE. If these execute successfully we'll have a high level of confidence
# that other operations were not in progress for that VM at that point (ie. the messages were processed in serial).
tasks = []
for resource_path, _ in resource_paths:
for i in range(number_updates):
patch_payload = {
"properties": {
"display_name": f'Perf test VM update {i}',
}
}
await post_resource(
payload=patch_payload,
endpoint=f'/api{resource_path}',
access_token=workspace_owner_token,
verify=verify,
method="PATCH",
wait=False)
# clear up all the VMs in parallel
# NOTE: Due to bug https://github.com/microsoft/AzureTRE/issues/1163 - this VM delete step currently fails
task = asyncio.create_task(disable_and_delete_ws_resource(workspace_id, resource_path, verify))
tasks.append(task)
await asyncio.gather(*tasks)
admin_token = await get_admin_token(verify)
# clear up workspace + service (if we created them)
if config.TEST_WORKSPACE_ID == "":
await disable_and_delete_tre_resource(workspace_path, verify)
|
AzureTRE/e2e_tests/test_performance.py/0
|
{
"file_path": "AzureTRE/e2e_tests/test_performance.py",
"repo_id": "AzureTRE",
"token_count": 2428
}
| 115 |
def get_installation_id(msg_body):
"""
This is used to identify each bundle install within the porter state store.
"""
return msg_body['id']
|
AzureTRE/resource_processor/resources/helpers.py/0
|
{
"file_path": "AzureTRE/resource_processor/resources/helpers.py",
"repo_id": "AzureTRE",
"token_count": 53
}
| 116 |
from mock import patch
import logging
from shared.logging import shell_output_logger
@patch("shared.logging.logger")
def test_shell_output_logger_empty_console_output(mock_logger):
shell_output_logger("", "prefix", logging.DEBUG)
mock_logger.debug.assert_called_once_with("shell console output is empty.")
@patch("shared.logging.logger")
def test_shell_output_logger_image_not_present_locally(mock_logger):
console_output = "Unable to find image 'test_image' locally\nexecution completed successfully!"
shell_output_logger(console_output, "prefix", logging.DEBUG)
mock_logger.debug.assert_called_with("Image not present locally, removing text from console output.")
mock_logger.log.assert_called_with(logging.INFO, "prefix execution completed successfully!")
@patch("shared.logging.logger")
def test_shell_output_logger_execution_completed_successfully(mock_logger):
console_output = "execution completed successfully!"
shell_output_logger(console_output, "prefix", logging.DEBUG)
mock_logger.log.assert_called_with(logging.INFO, "prefix execution completed successfully!")
@patch("shared.logging.logger")
def test_shell_output_logger_normal_case(mock_logger):
console_output = "Some logs"
shell_output_logger(console_output, "prefix", logging.DEBUG)
mock_logger.log.assert_called_with(logging.DEBUG, "prefix Some logs")
|
AzureTRE/resource_processor/tests_rp/test_logging.py/0
|
{
"file_path": "AzureTRE/resource_processor/tests_rp/test_logging.py",
"repo_id": "AzureTRE",
"token_count": 444
}
| 117 |
resource "azurerm_service_plan" "notifier_plan" {
name = "airlock-notifier-plan-${var.tre_id}"
resource_group_name = data.azurerm_resource_group.core.name
location = data.azurerm_resource_group.core.location
sku_name = "WS1"
os_type = "Windows"
tags = local.tre_shared_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_servicebus_queue" "notifications_queue" {
name = "notifications"
namespace_id = data.azurerm_servicebus_namespace.core.id
enable_partitioning = false
}
/* The notification queue needs to be subscribed to the notification event-grid */
resource "azurerm_eventgrid_event_subscription" "airlock_notification" {
name = local.airlock_notification_eventgrid_subscription_name
scope = data.azurerm_eventgrid_topic.airlock_notification.id
service_bus_queue_endpoint_id = azurerm_servicebus_queue.notifications_queue.id
delivery_identity {
type = "SystemAssigned"
}
}
resource "azurerm_logic_app_standard" "logic_app" {
name = "airlock-notifier-app-${var.tre_id}"
location = data.azurerm_resource_group.core.location
resource_group_name = data.azurerm_resource_group.core.name
app_service_plan_id = azurerm_service_plan.notifier_plan.id
storage_account_name = data.azurerm_storage_account.storage.name
storage_account_access_key = data.azurerm_storage_account.storage.primary_access_key
virtual_network_subnet_id = data.azurerm_subnet.airlock_notification.id
app_settings = {
"FUNCTIONS_WORKER_RUNTIME" = "node"
"WEBSITE_NODE_DEFAULT_VERSION" = "~12"
"serviceBus_connectionString" = data.azurerm_servicebus_namespace.core.default_primary_connection_string
"subscription" = data.azurerm_subscription.current.subscription_id
"resource_group" = data.azurerm_resource_group.core.name
"smtp_server_address" = var.smtp_server_address
"smtp_server_port" = var.smtp_server_port
"smtp_server_enable_ssl" = var.smtp_server_enable_ssl
"smtp_username" = var.smtp_username
"smtp_password" = var.smtp_password
"smtp_from_email" = var.smtp_from_email
"tre_url" = var.tre_url != "" ? var.tre_url : local.default_tre_url
"APPLICATIONINSIGHTS_CONNECTION_STRING" = data.azurerm_application_insights.core.connection_string
}
site_config {
ftps_state = "Disabled"
vnet_route_all_enabled = true
elastic_instance_minimum = 1
}
identity {
type = "SystemAssigned"
}
tags = local.tre_shared_service_tags
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/shared_services/airlock_notifier/terraform/airlock_notifier.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/terraform/airlock_notifier.tf",
"repo_id": "AzureTRE",
"token_count": 1292
}
| 118 |
#!/bin/bash
set -e
script_dir=$(realpath "$(dirname "${BASH_SOURCE[0]}")")
while [ "$1" != "" ]; do
case $1 in
--storage_account_name)
shift
storage_account_name=$1
;;
--fqdn)
shift
fqdn=$1
;;
--keyvault_name)
shift
keyvault_name=$1
;;
--resource_group_name)
shift
resource_group_name=$1
;;
--application_gateway_name)
shift
application_gateway_name=$1
;;
--cert_name)
shift
cert_name=$1
;;
*)
echo "Unexpected argument: '$1'"
usage
;;
esac
if [[ -z "$2" ]]; then
# if no more args then stop processing
break
fi
shift # remove the current value for `$1` and use the next
done
# done with processing args and can set this
set -o nounset
echo "Checking for index.html file in storage account"
# Create the default index.html page
cat << EOF > index.html
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml"><head><meta charset="utf-8"/><title></title></head><body></body></html>
EOF
# shellcheck disable=SC2016
indexExists=$(az storage blob list -o json \
--account-name "${storage_account_name}" \
--auth-mode login \
--container-name '$web' \
--query "[?name=='index.html'].name" \
| jq 'length')
if [[ ${indexExists} -lt 1 ]]; then
echo "No existing file found. Uploading index.html file"
# shellcheck disable=SC2016
az storage blob upload \
--account-name "${storage_account_name}" \
--auth-mode login \
--container-name '$web' \
--file index.html \
--name index.html \
--no-progress \
--only-show-errors
# Wait a bit for the App Gateway health probe to notice
echo "Waiting 30s for app gateway health probe"
sleep 30s
else
echo "index.html already present"
fi
ledir="${script_dir}/../letsencrypt"
mkdir -p "${ledir}/logs"
# Initiate the ACME challange
echo "Initiating ACME challenge"
export STORAGE_ACCOUNT_NAME="${storage_account_name}"
/opt/certbot/bin/certbot certonly \
--config-dir "${ledir}" \
--work-dir "${ledir}" \
--logs-dir "${ledir}"/logs \
--manual \
--preferred-challenges=http \
--manual-auth-hook "${script_dir}"/auth-hook.sh \
--manual-cleanup-hook "${script_dir}"/cleanup-hook.sh \
--domain "${fqdn}" \
--non-interactive \
--agree-tos \
--register-unsafely-without-email
# Convert the generated certificate to a .pfx
echo "Got cert. Converting to PFX"
CERT_DIR="${ledir}/live/${fqdn}"
CERT_PASSWORD=$(openssl rand -base64 30)
openssl pkcs12 -export \
-inkey "${CERT_DIR}/privkey.pem" \
-in "${CERT_DIR}/fullchain.pem" \
-out "${CERT_DIR}/aci.pfx" \
-passout "pass:${CERT_PASSWORD}"
# Save cert and password to KeyVault
echo "Importing cert to KeyVault ${keyvault_name}"
sid=$(az keyvault certificate import \
-o json \
--vault-name "${keyvault_name}" \
--name "${cert_name}" \
--file "${CERT_DIR}/aci.pfx" \
--password "${CERT_PASSWORD}" \
| jq -r '.sid')
echo "Saving certificate password to KV with key ${cert_name}-password"
az keyvault secret set --name "${cert_name}"-password \
--vault-name "${keyvault_name}" \
--value "${CERT_PASSWORD}"
echo "Updating SSL cert in app gateway"
az network application-gateway ssl-cert update \
--resource-group "${resource_group_name}" \
--gateway-name "${application_gateway_name}" \
--name 'cert-primary' \
--key-vault-secret-id "${sid}"
|
AzureTRE/templates/shared_services/certs/scripts/letsencrypt.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/certs/scripts/letsencrypt.sh",
"repo_id": "AzureTRE",
"token_count": 1516
}
| 119 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.5.0"
constraints = "3.5.0"
hashes = [
"h1:T4XsCHDT839VehWKdxbVsLn0ECjcQaUTzbSGW055pgM=",
"zh:0d8ae6d6e87f44ed4a178be03d6466339b0bb578ab54c2677e365a8281b0bb7d",
"zh:29d250d1a18d49652b28f234ecd17687b36c875dc47877a678e587d5d136b054",
"zh:2e69ba373cf009e8a60b36d04f3dbc4638708d1bf88be9f96b3e52cbf8f47f31",
"zh:53d525dd84ac63b5f749bfbc6b70a202dacf29597664d2ab1165efea6f24f630",
"zh:a25024d574ccd5ae6c2962f3bb71d510f62899f493b1ed096f2f7f0e2b18f975",
"zh:aabc64fe64319b95aaba1d1866f87abc7b10adae37d2eafa2f85f37317fdd49f",
"zh:acc6a977814897cb23d3b3753213281334238f8bce6d2b21e9f04fc4087ee980",
"zh:b24987e9416c39cd59c0fa41c139a97406b9955f0607fcafbf3315014456338a",
"zh:c550eae45fd32acdbe32b4e5c450ae95df6cb18903ac7216b1b07b23a16ce045",
"zh:c8f83b763b643893dcb6933a6bcee824cb514e06e7e5c5f5ac4ba187e66d7e22",
"zh:dcdac07e7ea18464dea729717870c275de9453775243c231e1fb305cad0ee597",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.4.2"
constraints = "3.4.2"
hashes = [
"h1:PIIfeOjmPoQRHfMM7MDr7qY3mQqD4F+38Dmq8pjvUUs=",
"zh:1e61d226778aefd01c0e139c0ad709b61e9ae4b33d72301b922bd3d000b76eee",
"zh:3c3295c3d2e9c3f9d60d557ee8faf2a30bd15f59f2c38ed13f50a3220dd027d0",
"zh:6661b4953b875857c3ac99fb1006daf314acebf2d1748045d208ebc8cbc647cd",
"zh:6e1823a349ceea5e4e0c684561473f57c46f73d7c197c39904d031ce6654bfb8",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8f8e6fd15e5228f1935c63d79bf3074f645ddba1350756acfc968b2a05bf85ee",
"zh:939a78da13a7932bd5429f0c77debe907bf9d6c6a26af50fd4d9f32ee16ea5a6",
"zh:995a592acbcde12f0d34ff5c3b74ec7054743315684b72b927bdc0d33e0e7c4d",
"zh:a9f8b88fe365ed9996d3386b415cabb445cf9d6e4b0e0b73f58af3aa31f1fa3d",
"zh:dda7c698cf92170665ca3ac1ccdc2177c0bec4807e69075422ae9d5c5308adbd",
"zh:eff42af6313499db0b3177a82851e0f2d2706e81cab11372d7d3673c41b15b9c",
"zh:fcd6826d4398147314620401a5908dd35c6f2ebac7e7d3a7d77078dbc7c5a0e6",
]
}
|
AzureTRE/templates/shared_services/cyclecloud/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/shared_services/cyclecloud/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 1351
}
| 120 |
#!/bin/bash
# See remove_state.sh for the purpose of these scripts
echo "IMPORTING STATE FOR FIREWALL..."
# check for the existence of the RG. If it's not there it's because we're in CI and building from scratch - we can skip this script
set +e
RESOURCE_GROUP_ID="rg-${TRE_ID}"
az group show -n $RESOURCE_GROUP_ID
if [ $? -ne 0 ]; then
echo "RG not found, skipping import_state"
exit 0
fi
set -e
# Initialise state for Terraform
terraform init -input=false -backend=true -reconfigure -upgrade \
-backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name}" \
-backend-config="storage_account_name=${TF_VAR_mgmt_storage_account_name}" \
-backend-config="container_name=${TF_VAR_terraform_state_container_name}" \
-backend-config="key=${TRE_ID}-shared-service-firewall"
# Import a resource if it exists in Azure but doesn't exist in Terraform
tf_state_list="$(terraform state list)"
function import_if_exists() {
ADDRESS=$1
ID=$2
CMD=$3
# Check if the resource exists in Terraform
TF_RESOURCE_EXISTS=$(echo "$tf_state_list" | grep -q ^${ADDRESS}$; echo $?)
if [[ ${TF_RESOURCE_EXISTS} -eq 0 ]]; then
echo "${ADDRESS} already in TF State, ignoring..."
return
fi
# Some resources, e.g. Firewall rules and Diagnostics, don't show up in `az resource show`,
# so we need a way to set up a custom command for them
if [[ -z ${CMD} ]]; then
CMD="az resource show --ids ${ID}"
fi
${CMD} > /dev/null
AZ_RESOURCE_EXISTS=$?
# If resource exists in Terraform, it's already managed -- don't do anything
# If resource doesn't exist in Terraform and doesn't exist in Azure, it will be created -- don't do anything
# If resource doesn't exist in Terraform but exist in Azure, we need to import it
if [[ ${TF_RESOURCE_EXISTS} -ne 0 && ${AZ_RESOURCE_EXISTS} -eq 0 ]]; then
echo "IMPORTING ${ADDRESS} ${ID}"
terraform import -var "tre_id=${TRE_ID}" -var "location=${LOCATION}" ${ADDRESS} ${ID}
fi
}
import_if_exists azurerm_firewall.fw "/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}" || echo "Resource already exists"
# Firewall rules
import_if_exists azurerm_firewall_application_rule_collection.resource_processor_subnet \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/applicationRuleCollections/arc-resource_processor_subnet" \
"az network firewall show --ids /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/applicationRuleCollections/arc-resource_processor_subnet"
import_if_exists azurerm_firewall_application_rule_collection.shared_subnet \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/applicationRuleCollections/arc-shared_subnet" \
"az network firewall show --ids /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/applicationRuleCollections/arc-shared_subnet"
import_if_exists azurerm_firewall_application_rule_collection.web_app_subnet \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/applicationRuleCollections/arc-web_app_subnet" \
"az network firewall show --ids /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/applicationRuleCollections/arc-web_app_subnet"
import_if_exists azurerm_firewall_network_rule_collection.general \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/networkRuleCollections/general" \
"az network firewall show --ids /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/networkRuleCollections/general"
import_if_exists azurerm_firewall_network_rule_collection.resource_processor_subnet \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/networkRuleCollections/nrc-resource_processor_subnet" \
"az network firewall show --ids /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/networkRuleCollections/nrc-resource_processor_subnet"
import_if_exists azurerm_firewall_network_rule_collection.web_app_subnet \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/networkRuleCollections/nrc-web_app_subnet" \
"az network firewall show --ids /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}/networkRuleCollections/nrc-web_app_subnet"
# Diagnostic settings
import_if_exists azurerm_monitor_diagnostic_setting.firewall \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/azureFirewalls/fw-${TRE_ID}|diagnostics-firewall-${TRE_ID}" \
"az monitor diagnostic-settings show --resource /subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/rg-${TRE_ID}/providers/microsoft.network/azureFirewalls/fw-${TRE_ID} --name diagnostics-firewall-${TRE_ID}"
import_if_exists azurerm_public_ip.fwpip "/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/publicIPAddresses/pip-fw-${TRE_ID}"
import_if_exists azurerm_subnet_route_table_association.rt_web_app_subnet_association \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/virtualNetworks/vnet-${TRE_ID}/subnets/WebAppSubnet"
# Route tables
import_if_exists azurerm_route_table.rt \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/routeTables/rt-${TRE_ID}"
import_if_exists azurerm_subnet_route_table_association.rt_shared_subnet_association \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/virtualNetworks/vnet-${TRE_ID}/subnets/SharedSubnet"
import_if_exists azurerm_subnet_route_table_association.rt_resource_processor_subnet_association \
"/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${RESOURCE_GROUP_ID}/providers/Microsoft.Network/virtualNetworks/vnet-${TRE_ID}/subnets/ResourceProcessorSubnet"
|
AzureTRE/templates/shared_services/firewall/terraform/import_state.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/firewall/terraform/import_state.sh",
"repo_id": "AzureTRE",
"token_count": 2344
}
| 121 |
data "azurerm_log_analytics_workspace" "tre" {
name = "log-${var.tre_id}"
resource_group_name = local.core_resource_group_name
}
data "azurerm_service_plan" "core" {
name = "plan-${var.tre_id}"
resource_group_name = local.core_resource_group_name
}
data "azurerm_subnet" "shared" {
resource_group_name = local.core_resource_group_name
virtual_network_name = local.core_vnet
name = "SharedSubnet"
}
data "azurerm_subnet" "web_app" {
resource_group_name = local.core_resource_group_name
virtual_network_name = local.core_vnet
name = "WebAppSubnet"
}
data "azurerm_private_dns_zone" "mysql" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.mysql.database.azure.com"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_private_dns_zone" "azurewebsites" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_storage_account" "gitea" {
name = local.storage_account_name
resource_group_name = local.core_resource_group_name
}
data "local_file" "version" {
filename = "${path.module}/../docker/version.txt"
}
data "azurerm_container_registry" "mgmt_acr" {
name = var.acr_name
resource_group_name = var.mgmt_resource_group_name
}
data "azurerm_key_vault" "keyvault" {
name = local.keyvault_name
resource_group_name = local.core_resource_group_name
}
data "azurerm_resource_group" "rg" {
name = local.core_resource_group_name
}
data "azurerm_monitor_diagnostic_categories" "webapp" {
resource_id = data.azurerm_service_plan.core.id
depends_on = [
azurerm_linux_web_app.gitea,
]
}
|
AzureTRE/templates/shared_services/gitea/terraform/data.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/gitea/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 790
}
| 122 |
#!/bin/bash
docker_pull_timeout=10
while true; do
if [ $docker_pull_timeout == 0 ]; then
echo 'ERROR - Timeout while waiting for sonatype/nexus3 to be pulled from Docker Hub'
exit 1
fi
if docker pull sonatype/nexus3; then
echo "Image pulled successfully"
break
else
echo "Failed to pull image, restarting Docker service"
systemctl restart docker.service
sleep 60
fi
((docker_pull_timeout--));
done
docker run -d -p 80:8081 -p 443:8443 -p 8083:8083 -v /etc/nexus-data:/nexus-data \
--restart always \
--name nexus \
--log-driver local \
sonatype/nexus3
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/deploy_nexus_container.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/deploy_nexus_container.sh",
"repo_id": "AzureTRE",
"token_count": 234
}
| 123 |
#!/bin/bash
set -o pipefail
set -o nounset
# set -o xtrace
if [ -z "$1" ]
then
echo 'New password to set needs to be passed as argument'
fi
# Get the current password so we can post to the API
# (this is created in /nexus-data mounted volume as part of Nexus container start-up)
password_timeout=300
echo 'Checking for Nexus admin password file...'
while [ ! -f /etc/nexus-data/admin.password ]; do
# We must first wait for the file to be created
if [ $password_timeout == 0 ]; then
echo 'ERROR - Timeout while waiting for nexus-data/admin.password to be created'
exit 1
fi
sleep 1
((password_timeout--))
done
current_password=$(cat /etc/nexus-data/admin.password)
# Set own admin password so we can connect to repository manager later on using TF KV secret
reset_timeout=300
echo "Nexus default admin password found ($current_password). Resetting..."
res=1
while test "$res" != "0"; do
curl -ifu admin:"$current_password" -XPUT -H 'Content-Type:text/plain' --data "$1" \
http://localhost/service/rest/v1/security/users/admin/change-password
res=$?
echo "Attempt to reset password finished with code $res"
if test "$res" == "0"; then
echo 'Password reset successfully.'
else
if [ $reset_timeout == 0 ]; then
echo 'ERROR - Timeout while trying to reset Nexus admin password'
exit 1
fi
sleep 5
((reset_timeout+=5))
fi
done
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/reset_nexus_password.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/reset_nexus_password.sh",
"repo_id": "AzureTRE",
"token_count": 450
}
| 124 |
---
schemaVersion: 1.0.0
name: tre-service-azureml
version: 0.8.10
description: "An Azure TRE service for Azure Machine Learning"
registry: azuretre
dockerfile: Dockerfile.tmpl
credentials:
# Credentials for interacting with the AAD Auth tenant
- name: auth_client_id
env: AUTH_CLIENT_ID
- name: auth_client_secret
env: AUTH_CLIENT_SECRET
- name: auth_tenant_id
env: AUTH_TENANT_ID
# Credentials for interacting with Azure
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: id
type: string
description: "Resource ID"
- name: display_name
description: "Display name for the workspace"
- name: description
description: "Description for the workspace"
- name: is_exposed_externally
type: boolean
default: false
env: IS_EXPOSED_EXTERNALLY
description: "Determines if the AML workspace will be available over public/internet"
- name: address_space
type: string
description: "Address space for the AML subnets"
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
env: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: arm_environment
env: ARM_ENVIRONMENT
- name: azure_environment
env: AZURE_ENVIRONMENT
outputs:
- name: azureml_workspace_name
type: string
applyTo:
- install
- upgrade
- name: azureml_acr_id
type: string
applyTo:
- install
- upgrade
- name: azureml_storage_account_id
type: string
applyTo:
- install
- upgrade
- name: connection_uri
type: string
applyTo:
- install
- upgrade
- name: workspace_address_spaces
type: string
applyTo:
- install
- upgrade
- name: aml_subnet_address_prefixes
type: string
applyTo:
- install
- upgrade
- name: storage_tag
type: string
applyTo:
- install
- upgrade
- name: batch_tag
type: string
applyTo:
- install
- upgrade
- name: mcr_tag
type: string
applyTo:
- install
- upgrade
- name: aml_fqdn
type: string
applyTo:
- install
- upgrade
mixins:
- terraform:
clientVersion: 1.3.6
- az:
clientVersion: 2.37.0
install:
- terraform:
description: "Deploy Azure ML Service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
display_name: ${ bundle.parameters.display_name }
description: ${ bundle.parameters.description }
address_space: ${ bundle.parameters.address_space }
is_exposed_externally: ${ bundle.parameters.is_exposed_externally }
arm_tenant_id: ${ bundle.credentials.azure_tenant_id }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
arm_environment: ${ bundle.parameters.arm_environment }
azure_environment: ${ bundle.parameters.azure_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-service-azureml-${ bundle.parameters.id }
outputs:
- name: azureml_workspace_name
- name: azureml_acr_id
- name: azureml_storage_account_id
- name: connection_uri
- name: workspace_address_spaces
- name: aml_subnet_address_prefixes
- name: storage_tag
- name: batch_tag
- name: mcr_tag
- name: aml_fqdn
upgrade:
- terraform:
description: "Upgrade Azure ML Service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
display_name: ${ bundle.parameters.display_name }
description: ${ bundle.parameters.description }
address_space: ${ bundle.parameters.address_space }
is_exposed_externally: ${ bundle.parameters.is_exposed_externally }
arm_tenant_id: ${ bundle.credentials.azure_tenant_id }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
arm_environment: ${ bundle.parameters.arm_environment }
azure_environment: ${ bundle.parameters.azure_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-service-azureml-${ bundle.parameters.id }
outputs:
- name: azureml_workspace_name
- name: azureml_acr_id
- name: azureml_storage_account_id
- name: connection_uri
- name: workspace_address_spaces
- name: aml_subnet_address_prefixes
- name: storage_tag
- name: batch_tag
- name: mcr_tag
- name: aml_fqdn
uninstall:
- terraform:
description: "Delete the Azure ML Service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
display_name: ${ bundle.parameters.display_name }
description: ${ bundle.parameters.description }
address_space: ${ bundle.parameters.address_space }
is_exposed_externally: ${ bundle.parameters.is_exposed_externally }
arm_tenant_id: ${ bundle.credentials.azure_tenant_id }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
arm_environment: ${ bundle.parameters.arm_environment }
azure_environment: ${ bundle.parameters.azure_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-service-azureml-${ bundle.parameters.id }
|
AzureTRE/templates/workspace_services/azureml/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 2888
}
| 125 |
Dockerfile.tmpl
|
AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/.dockerignore/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/.dockerignore",
"repo_id": "AzureTRE",
"token_count": 6
}
| 126 |
# Contents
## porter.yaml
This is the porter manifest. See <https://porter.sh/author-bundles/> for
details on every field and how to configure your bundle. This is a required
file.
## helpers.sh
This is a bash script where you can place helper functions that you can call
from your porter.yaml file.
## README.md
This explains the files created by `porter create`. It is not used by porter and
can be deleted.
## Dockerfile.tmpl
This is a template Dockerfile for the bundle's invocation image. You can
customize it to use different base images, install tools and copy configuration
files. Porter will use it as a template and append lines to it for the mixin and to set
the CMD appropriately for the CNAB specification. You can delete this file if you don't
need it.
Add the following line to **porter.yaml** to enable the Dockerfile template:
```yaml
dockerfile: Dockerfile.tmpl
```
By default, the Dockerfile template is disabled and Porter automatically copies
all of the files in the current directory into the bundle's invocation image. When
you use a custom Dockerfile template, you must manually copy files into the bundle
using COPY statements in the Dockerfile template.
## .gitignore
This is a default file that we provide to help remind you which files are
generated by Porter, and shouldn't be committed to source control. You can
delete it if you don't need it.
## .dockerignore
This is a default file that controls which files are copied into the bundle's
invocation image by default. You can delete it if you don't need it.
|
AzureTRE/templates/workspace_services/databricks/README.md/0
|
{
"file_path": "AzureTRE/templates/workspace_services/databricks/README.md",
"repo_id": "AzureTRE",
"token_count": 394
}
| 127 |
output "gitea_fqdn" {
value = azurerm_linux_web_app.gitea.default_hostname
}
output "authentication_callback_uri" {
value = "https://${azurerm_linux_web_app.gitea.default_hostname}/user/oauth2/oidc/callback"
}
output "connection_uri" {
value = "https://${azurerm_linux_web_app.gitea.default_hostname}/"
}
output "workspace_address_space" {
value = jsonencode(data.azurerm_virtual_network.ws.address_space)
}
output "is_exposed_externally" {
value = false
}
|
AzureTRE/templates/workspace_services/gitea/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 186
}
| 128 |
#!/usr/bin/env sh
echo >&2 "sshd exited. code=${1}"
# terminate other services to exit from the container
exec s6-svscanctl -t /var/run/s6/services
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/sshd/finish/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/sshd/finish",
"repo_id": "AzureTRE",
"token_count": 53
}
| 129 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.guacamole.auth.azuretre.user;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.auth.azuretre.AzureTREAuthenticationProvider;
import org.apache.guacamole.net.auth.AbstractUserContext;
import org.apache.guacamole.net.auth.AuthenticationProvider;
import org.apache.guacamole.net.auth.Connection;
import org.apache.guacamole.net.auth.Directory;
import org.apache.guacamole.net.auth.User;
import org.apache.guacamole.net.auth.permission.ObjectPermissionSet;
import org.apache.guacamole.net.auth.simple.SimpleDirectory;
import org.apache.guacamole.net.auth.simple.SimpleObjectPermissionSet;
import org.apache.guacamole.net.auth.simple.SimpleUser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Map;
public class TreUserContext extends AbstractUserContext {
private static final Logger LOGGER = LoggerFactory.getLogger(TreUserContext.class);
private final AuthenticationProvider authProvider;
private final Directory<Connection> connectionDirectory;
private User self;
public TreUserContext(final AuthenticationProvider authProvider, Map<String, Connection> connections) {
LOGGER.debug("Creating a new tre user context.");
this.authProvider = authProvider;
this.connectionDirectory = new SimpleDirectory(connections);
}
public void init(final AzureTREAuthenticatedUser user) throws GuacamoleException {
self = new SimpleUser(user.getIdentifier()) {
@Override
public ObjectPermissionSet getConnectionPermissions() throws GuacamoleException {
return new SimpleObjectPermissionSet(connectionDirectory.getIdentifiers());
}
@Override
public ObjectPermissionSet getConnectionGroupPermissions() {
return new SimpleObjectPermissionSet(
Collections.singleton(AzureTREAuthenticationProvider.ROOT_CONNECTION_GROUP));
}
};
}
@Override
public User self() {
return self;
}
@Override
public AuthenticationProvider getAuthenticationProvider() {
LOGGER.debug("getAuthenticationProvider");
return authProvider;
}
public Object getResource() throws GuacamoleException {
return null;
}
public Directory<Connection> getConnectionDirectory() throws GuacamoleException {
LOGGER.debug("getConnectionDirectory");
return this.connectionDirectory;
}
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/user/TreUserContext.java/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/user/TreUserContext.java",
"repo_id": "AzureTRE",
"token_count": 1067
}
| 130 |
data "azurerm_client_config" "current" {}
data "azurerm_resource_group" "ws" {
name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_virtual_network" "ws" {
name = "vnet-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_key_vault" "ws" {
name = local.keyvault_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_key_vault_secret" "aad_tenant_id" {
name = "auth-tenant-id"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_key_vault_secret" "workspace_client_id" {
name = "workspace-client-id"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_key_vault_secret" "workspace_client_secret" {
name = "workspace-client-secret"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_subnet" "web_apps" {
name = "WebAppsSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_virtual_network.ws.resource_group_name
}
data "azurerm_subnet" "services" {
name = "ServicesSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_private_dns_zone" "azurewebsites" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_container_registry" "mgmt_acr" {
name = var.mgmt_acr_name
resource_group_name = var.mgmt_resource_group_name
}
data "azurerm_log_analytics_workspace" "tre" {
name = "log-${var.tre_id}"
resource_group_name = local.core_resource_group_name
}
data "local_file" "version" {
filename = "${path.module}/../guacamole-server/docker/version.txt"
}
data "azurerm_application_insights" "ws" {
name = "appi-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_monitor_diagnostic_categories" "guacamole" {
resource_id = azurerm_linux_web_app.guacamole.id
depends_on = [
azurerm_linux_web_app.guacamole,
]
}
|
AzureTRE/templates/workspace_services/guacamole/terraform/data.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 1029
}
| 131 |
# GUID to identify the workspace service
ID=__CHANGE_ME__
# GUID to identify the workspace bundle
WORKSPACE_ID="__CHANGE_ME__"
# Service principal client ID & secret used by the inference service to connect to Azure ML
INFERENCE_SP_CLIENT_ID=__CHANGE_ME__
INFERENCE_SP_CLIENT_SECRET=__CHANGE_ME__
|
AzureTRE/templates/workspace_services/innereye/.env.sample/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/.env.sample",
"repo_id": "AzureTRE",
"token_count": 98
}
| 132 |
variable "workspace_id" {
type = string
}
variable "tre_id" {
type = string
}
variable "tre_resource_id" {
type = string
}
variable "arm_tenant_id" {
type = string
}
variable "arm_client_id" {
type = string
}
variable "arm_client_secret" {
type = string
}
variable "arm_use_msi" {
type = bool
}
variable "inference_sp_client_id" {
type = string
}
variable "inference_sp_client_secret" {
type = string
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/workspace_services/innereye/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 173
}
| 133 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.18.0"
constraints = "3.18.0"
hashes = [
"h1:JP1ql3IvCpG1f88Zfb+W0Gm9kRKHg2c+VXOfVKpHZTY=",
"zh:038d308618653e999070f437d42c009d191abbd5c585deff3217ad46676d213e",
"zh:7377c1e66d143db3c8a4a24bb45956dd71fb75c4a62863ff6a372ea66bd1501a",
"zh:8219107a8d8482691c23b6a96a97232741b38b80d0ad195af31c0f6de85f457e",
"zh:9f497110c35615917f772331abeaaa3a3e694f7f300e79dd1886259601f4d159",
"zh:a5137efd8fc2c84cfc4b865bb55c166d7c9370ff606962d60b5cded787af253d",
"zh:a554206ac0f7f859fb5a9a9b42c80903d3ca3ea13082cc6e5f79f672df9efa89",
"zh:bda8971b09fb271c58f13cc97bbd50055a70bab35c9ec6471cd8b7c4c7613767",
"zh:d831b429fd4376f609898c82154e832abd1281e4ccb72a44e7560ccbb21cbeb1",
"zh:e45d58c341e7b58c82e19fbb2517e6fdd7cac111c776968ad03a3cf0882994da",
"zh:ed613a7237c031f3b93e1fa659f1d80b5774d845f25d86eb87560dba44762dd5",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fd486a50adcb424499e2b7b980faaf910337f913adf08b158b6c0ce3cb015c8f",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.4.2"
constraints = "3.4.2"
hashes = [
"h1:PIIfeOjmPoQRHfMM7MDr7qY3mQqD4F+38Dmq8pjvUUs=",
"zh:1e61d226778aefd01c0e139c0ad709b61e9ae4b33d72301b922bd3d000b76eee",
"zh:3c3295c3d2e9c3f9d60d557ee8faf2a30bd15f59f2c38ed13f50a3220dd027d0",
"zh:6661b4953b875857c3ac99fb1006daf314acebf2d1748045d208ebc8cbc647cd",
"zh:6e1823a349ceea5e4e0c684561473f57c46f73d7c197c39904d031ce6654bfb8",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8f8e6fd15e5228f1935c63d79bf3074f645ddba1350756acfc968b2a05bf85ee",
"zh:939a78da13a7932bd5429f0c77debe907bf9d6c6a26af50fd4d9f32ee16ea5a6",
"zh:995a592acbcde12f0d34ff5c3b74ec7054743315684b72b927bdc0d33e0e7c4d",
"zh:a9f8b88fe365ed9996d3386b415cabb445cf9d6e4b0e0b73f58af3aa31f1fa3d",
"zh:dda7c698cf92170665ca3ac1ccdc2177c0bec4807e69075422ae9d5c5308adbd",
"zh:eff42af6313499db0b3177a82851e0f2d2706e81cab11372d7d3673c41b15b9c",
"zh:fcd6826d4398147314620401a5908dd35c6f2ebac7e7d3a7d77078dbc7c5a0e6",
]
}
|
AzureTRE/templates/workspace_services/mysql/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mysql/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 1359
}
| 134 |
CREATE ROLE ${OHDSI_ADMIN_ROLE} CREATEDB REPLICATION VALID UNTIL 'infinity';
COMMENT ON ROLE ${OHDSI_ADMIN_ROLE} IS 'Administration group for OHDSI applications';
CREATE ROLE ${OHDSI_APP_ROLE} VALID UNTIL 'infinity';
COMMENT ON ROLE ${OHDSI_APP_ROLE} IS 'Application groupfor OHDSI applications';
CREATE ROLE ${OHDSI_ADMIN_USERNAME} LOGIN ENCRYPTED PASSWORD ${admin_md5} VALID UNTIL 'infinity';
GRANT ${OHDSI_ADMIN_ROLE} TO ${OHDSI_ADMIN_USERNAME};
COMMENT ON ROLE ${OHDSI_ADMIN_USERNAME} IS 'Admin user account for OHDSI applications';
CREATE ROLE ${OHDSI_APP_USERNAME} LOGIN ENCRYPTED PASSWORD ${app_md5} VALID UNTIL 'infinity';
GRANT ${OHDSI_APP_ROLE} TO ${OHDSI_APP_USERNAME};
COMMENT ON ROLE ${OHDSI_APP_USERNAME} IS 'Application user account for OHDSI applications';
GRANT ALL ON DATABASE ${DATABASE_NAME} TO GROUP ${OHDSI_ADMIN_ROLE};
GRANT CONNECT, TEMPORARY ON DATABASE ${DATABASE_NAME} TO GROUP ${OHDSI_APP_ROLE};
|
AzureTRE/templates/workspace_services/ohdsi/sql/atlas_create_roles_users.sql/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/sql/atlas_create_roles_users.sql",
"repo_id": "AzureTRE",
"token_count": 371
}
| 135 |
# The API needs permissions to stop/start VMs
data "azurerm_user_assigned_identity" "api_id" {
name = "id-api-${var.tre_id}"
resource_group_name = "rg-${var.tre_id}"
}
# TODO: the assigned builtin role gives too wide permissions.
# https://github.com/microsoft/AzureTRE/issues/2389
resource "azurerm_role_assignment" "api_vm_contributor" {
scope = azurerm_resource_group.ws.id
role_definition_name = "Virtual Machine Contributor"
principal_id = data.azurerm_user_assigned_identity.api_id.principal_id
}
# Needed to include untagged resources in cost reporting #2933
resource "azurerm_role_assignment" "api_reader" {
scope = azurerm_resource_group.ws.id
role_definition_name = "Reader"
principal_id = data.azurerm_user_assigned_identity.api_id.principal_id
}
|
AzureTRE/templates/workspaces/base/terraform/api-permissions.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/api-permissions.tf",
"repo_id": "AzureTRE",
"token_count": 339
}
| 136 |
import React, { useContext } from 'react';
import { Workspace } from '../../models/workspace';
import { ResourceCardList } from '../shared/ResourceCardList';
import { Resource } from '../../models/resource';
import { PrimaryButton, Stack } from '@fluentui/react';
import { ResourceType } from '../../models/resourceType';
import { CreateUpdateResourceContext } from '../../contexts/CreateUpdateResourceContext';
import { RoleName } from '../../models/roleNames';
import { SecuredByRole } from '../shared/SecuredByRole';
interface RootDashboardProps {
selectWorkspace?: (workspace: Workspace) => void,
workspaces: Array<Workspace>,
updateWorkspace: (w: Workspace) => void,
removeWorkspace: (w: Workspace) => void,
addWorkspace: (w: Workspace) => void
}
export const RootDashboard: React.FunctionComponent<RootDashboardProps> = (props: RootDashboardProps) => {
const createFormCtx = useContext(CreateUpdateResourceContext);
return (
<>
<Stack className="tre-panel">
<Stack.Item>
<Stack horizontal horizontalAlign="space-between">
<Stack.Item><h1>Workspaces</h1></Stack.Item>
<Stack.Item style={{ width: 200, textAlign: 'right' }}>
<SecuredByRole allowedAppRoles={[RoleName.TREAdmin]} element={
<PrimaryButton iconProps={{ iconName: 'Add' }} text="Create new" onClick={() => {
createFormCtx.openCreateForm({
resourceType: ResourceType.Workspace,
onAdd: (r: Resource) => props.addWorkspace(r as Workspace)
})
}} />
} />
</Stack.Item>
</Stack>
</Stack.Item>
<Stack.Item>
<ResourceCardList
resources={props.workspaces}
updateResource={(r: Resource) => props.updateWorkspace(r as Workspace)}
removeResource={(r: Resource) => props.removeWorkspace(r as Workspace)}
emptyText="No workspaces to display. Create one to get started." />
</Stack.Item>
</Stack>
</>
);
};
|
AzureTRE/ui/app/src/components/root/RootDashboard.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/root/RootDashboard.tsx",
"repo_id": "AzureTRE",
"token_count": 832
}
| 137 |
import React, { useContext, useEffect, useState } from 'react';
import { ComponentAction, VMPowerStates, Resource } from '../../models/resource';
import { CommandBar, IconButton, IContextualMenuItem, IContextualMenuProps } from '@fluentui/react';
import { RoleName, WorkspaceRoleName } from '../../models/roleNames';
import { SecuredByRole } from './SecuredByRole';
import { ResourceType } from '../../models/resourceType';
import { HttpMethod, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { ApiEndpoint } from '../../models/apiEndpoints';
import { UserResource } from '../../models/userResource';
import { getActionIcon, ResourceTemplate, TemplateAction } from '../../models/resourceTemplate';
import { ConfirmDeleteResource } from './ConfirmDeleteResource';
import { ConfirmCopyUrlToClipboard } from './ConfirmCopyUrlToClipboard';
import { ConfirmDisableEnableResource } from './ConfirmDisableEnableResource';
import { CreateUpdateResourceContext } from '../../contexts/CreateUpdateResourceContext';
import { Workspace } from '../../models/workspace';
import { WorkspaceService } from '../../models/workspaceService';
import { actionsDisabledStates } from '../../models/operation';
import { AppRolesContext } from '../../contexts/AppRolesContext';
import { useAppDispatch } from '../../hooks/customReduxHooks';
import { addUpdateOperation } from '../shared/notifications/operationsSlice';
import { ConfirmUpgradeResource } from './ConfirmUpgradeResource';
interface ResourceContextMenuProps {
resource: Resource,
componentAction: ComponentAction,
commandBar?: boolean
}
export const ResourceContextMenu: React.FunctionComponent<ResourceContextMenuProps> = (props: ResourceContextMenuProps) => {
const apiCall = useAuthApiCall();
const workspaceCtx = useContext(WorkspaceContext);
const [showDisable, setShowDisable] = useState(false);
const [showDelete, setShowDelete] = useState(false);
const [showCopyUrl, setShowCopyUrl] = useState(false);
const [showUpgrade, setShowUpgrade] = useState(false);
const [resourceTemplate, setResourceTemplate] = useState({} as ResourceTemplate);
const createFormCtx = useContext(CreateUpdateResourceContext);
const [parentResource, setParentResource] = useState({} as WorkspaceService | Workspace);
const [roles, setRoles] = useState([] as Array<string>);
const appRoles = useContext(AppRolesContext); // the user is in these roles which apply across the app
const dispatch = useAppDispatch();
// get the resource template
useEffect(() => {
const getTemplate = async () => {
if (!props.resource || !props.resource.id) return;
let templatesPath;
switch (props.resource.resourceType) {
case ResourceType.Workspace:
templatesPath = ApiEndpoint.WorkspaceTemplates; break;
case ResourceType.WorkspaceService:
templatesPath = ApiEndpoint.WorkspaceServiceTemplates; break;
case ResourceType.SharedService:
templatesPath = ApiEndpoint.SharedServiceTemplates; break;
case ResourceType.UserResource:
const ur = props.resource as UserResource;
const parentService = (await apiCall(
`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}/${ur.parentWorkspaceServiceId}`,
HttpMethod.Get,
workspaceCtx.workspaceApplicationIdURI))
.workspaceService;
setParentResource(parentService);
templatesPath = `${ApiEndpoint.WorkspaceServiceTemplates}/${parentService.templateName}/${ApiEndpoint.UserResourceTemplates}`; break;
default:
throw Error('Unsupported resource type.');
}
let r = [] as Array<string>;
let wsAuth = false;
switch (props.resource.resourceType) {
case ResourceType.SharedService:
r = [RoleName.TREAdmin, WorkspaceRoleName.WorkspaceOwner];
break;
case ResourceType.WorkspaceService:
r = [WorkspaceRoleName.WorkspaceOwner]
wsAuth = true;
break;
case ResourceType.UserResource:
r = [WorkspaceRoleName.WorkspaceOwner, WorkspaceRoleName.WorkspaceResearcher, WorkspaceRoleName.AirlockManager];
wsAuth = true;
break;
case ResourceType.Workspace:
r = [RoleName.TREAdmin];
break;
}
setRoles(r);
// should we bother getting the template? if the user isn't in the right role they won't see the menu at all.
const userRoles = wsAuth ? workspaceCtx.roles : appRoles.roles;
if (userRoles && r.filter(x => userRoles.includes(x)).length > 0) {
const template = await apiCall(`${templatesPath}/${props.resource.templateName}`, HttpMethod.Get);
setResourceTemplate(template);
}
};
getTemplate();
}, [apiCall, props.resource, workspaceCtx, appRoles]);
const doAction = async (actionName: string) => {
const action = await apiCall(`${props.resource.resourcePath}/${ApiEndpoint.InvokeAction}?action=${actionName}`, HttpMethod.Post, workspaceCtx.workspaceApplicationIdURI);
action && action.operation && dispatch(addUpdateOperation(action.operation));
}
// context menu
let menuItems: Array<any> = [];
menuItems = [
{
key: 'update',
text: 'Update',
iconProps: { iconName: 'WindowEdit' },
onClick: () => createFormCtx.openCreateForm({
resourceType: props.resource.resourceType,
updateResource: props.resource,
resourceParent: parentResource,
workspaceApplicationIdURI: workspaceCtx.workspaceApplicationIdURI,
}),
disabled: (props.componentAction === ComponentAction.Lock)
},
{
key: 'disable',
text: props.resource.isEnabled ? 'Disable' : 'Enable',
iconProps: { iconName: props.resource.isEnabled ? 'CirclePause' : 'PlayResume' },
onClick: () => setShowDisable(true),
disabled: (props.componentAction === ComponentAction.Lock)
},
{
key: 'delete',
text: 'Delete',
title: props.resource.isEnabled ? 'Resource must be disabled before deleting' : 'Delete this resource',
iconProps: { iconName: 'Delete' },
onClick: () => setShowDelete(true),
disabled: (props.resource.isEnabled || props.componentAction === ComponentAction.Lock)
},
];
const shouldDisableConnect = () => {
return props.componentAction === ComponentAction.Lock
|| actionsDisabledStates.includes(props.resource.deploymentStatus)
|| !props.resource.isEnabled
|| (props.resource.azureStatus?.powerState && props.resource.azureStatus.powerState !== VMPowerStates.Running);
}
// add 'connect' button if we have a URL to connect to
if(props.resource.properties.connection_uri){
if (props.resource.properties.is_exposed_externally === true) {
menuItems.push({
key: 'connect',
text: 'Connect',
title: shouldDisableConnect() ? 'Resource must be deployed, enabled & powered on to connect' : 'Connect to resource',
iconProps: { iconName: 'PlugConnected' },
onClick: () => { window.open(props.resource.properties.connection_uri, '_blank') },
disabled: shouldDisableConnect()
})
}
else if (props.resource.properties.is_exposed_externally === false) {
menuItems.push({
key: 'connect',
text: 'Connect',
title: shouldDisableConnect() ? 'Resource must be deployed, enabled & powered on to connect' : 'Connect to resource',
iconProps: { iconName: 'PlugConnected' },
onClick: () => setShowCopyUrl(true),
disabled: shouldDisableConnect()
})
}
}
const shouldDisableActions = () => {
return props.componentAction === ComponentAction.Lock
|| actionsDisabledStates.includes(props.resource.deploymentStatus)
|| !props.resource.isEnabled;
}
// add custom actions if we have any
if (resourceTemplate && resourceTemplate.customActions && resourceTemplate.customActions.length > 0) {
let customActions: Array<IContextualMenuItem> = [];
resourceTemplate.customActions.forEach((a: TemplateAction) => {
customActions.push(
{
key: a.name,
text: a.name,
title: a.description,
iconProps: { iconName: getActionIcon(a.name) },
className: 'tre-context-menu',
onClick: () => { doAction(a.name) }
}
);
});
menuItems.push({
key: 'custom-actions',
text: 'Actions',
title: shouldDisableActions() ? 'Resource must be deployed and enabled to perform actions': 'Custom Actions',
iconProps: { iconName: 'Asterisk' },
disabled: shouldDisableActions(),
subMenuProps: { items: customActions }
});
}
// add 'upgrade' button if we have available template upgrades
const nonMajorUpgrades = props.resource.availableUpgrades?.filter(upgrade => !upgrade.forceUpdateRequired)
if (nonMajorUpgrades?.length > 0) {
menuItems.push({
key: 'upgrade',
text: 'Upgrade',
title: 'Upgrade this resource template version',
iconProps: { iconName: 'Refresh' },
onClick: () => setShowUpgrade(true),
disabled: (props.componentAction === ComponentAction.Lock)
})
}
const menuProps: IContextualMenuProps = {
shouldFocusOnMount: true,
items: menuItems
};
return (
<>
<SecuredByRole allowedWorkspaceRoles={roles} allowedAppRoles={roles} element={
props.commandBar ?
<CommandBar
items={menuItems}
ariaLabel="Resource actions"
/>
:
<IconButton iconProps={{ iconName: 'More' }} menuProps={menuProps} className="tre-hide-chevron" disabled={props.componentAction === ComponentAction.Lock} />
} />
{
showDisable &&
<ConfirmDisableEnableResource onDismiss={() => setShowDisable(false)} resource={props.resource} isEnabled={!props.resource.isEnabled} />
}
{
showDelete &&
<ConfirmDeleteResource onDismiss={() => setShowDelete(false)} resource={props.resource} />
}
{
showCopyUrl &&
<ConfirmCopyUrlToClipboard onDismiss={() => setShowCopyUrl(false)} resource={props.resource} />
}
{
showUpgrade &&
<ConfirmUpgradeResource onDismiss={() => setShowUpgrade(false)} resource={props.resource} />
}
</>
)
};
|
AzureTRE/ui/app/src/components/shared/ResourceContextMenu.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceContextMenu.tsx",
"repo_id": "AzureTRE",
"token_count": 3761
}
| 138 |
import { DefaultButton, Dialog, DialogFooter, DocumentCard, DocumentCardDetails, DocumentCardPreview, DocumentCardTitle, DocumentCardType, getTheme, Icon, IDocumentCardPreviewProps, IStackTokens, Panel, PanelType, PrimaryButton, Spinner, SpinnerSize, Stack, TextField } from "@fluentui/react";
import { useCallback, useContext, useEffect, useState } from "react";
import { useNavigate } from "react-router-dom";
import { WorkspaceContext } from "../../../contexts/WorkspaceContext";
import { HttpMethod, useAuthApiCall } from "../../../hooks/useAuthApiCall";
import { AirlockRequest, AirlockRequestType, NewAirlockRequest } from "../../../models/airlock";
import { ApiEndpoint } from "../../../models/apiEndpoints";
import { APIError } from "../../../models/exceptions";
import { ExceptionLayout } from "../ExceptionLayout";
interface AirlockNewRequestProps {
onCreateRequest: (request: AirlockRequest) => void;
}
export const AirlockNewRequest: React.FunctionComponent<AirlockNewRequestProps> = (props: AirlockNewRequestProps) => {
const [newRequest, setNewRequest] = useState<NewAirlockRequest>({} as NewAirlockRequest);
const [requestValid, setRequestValid] = useState(false);
const [hideCreateDialog, setHideCreateDialog] = useState(true);
const [creating, setCreating] = useState(false);
const [createError, setCreateError] = useState(false);
const [apiCreateError, setApiSubmitError] = useState({} as APIError);
const navigate = useNavigate();
const workspaceCtx = useContext(WorkspaceContext);
const apiCall = useAuthApiCall();
const onChangetitle = useCallback(
(event: React.FormEvent<HTMLInputElement | HTMLTextAreaElement>, newValue?: string) => {
setNewRequest(request => {
return {
...request,
title: newValue || ''
}
});
},
[setNewRequest]
);
const onChangeBusinessJustification = useCallback(
(event: React.FormEvent<HTMLInputElement | HTMLTextAreaElement>, newValue?: string) => {
setNewRequest(request => {
return {
...request,
businessJustification: newValue || ''
}
});
},
[setNewRequest]
);
useEffect(
() => setRequestValid(
newRequest.title?.length > 0 &&
newRequest.businessJustification?.length > 0
),
[newRequest, setRequestValid]
);
// Submit Airlock request to API
const create = useCallback(async () => {
if (requestValid) {
setCreating(true);
setCreateError(false);
try {
const response = await apiCall(
`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.AirlockRequests}`,
HttpMethod.Post,
workspaceCtx.workspaceApplicationIdURI,
newRequest
);
props.onCreateRequest(response.airlockRequest);
setHideCreateDialog(true);
} catch (err: any) {
err.userMessage = 'Error submitting airlock request';
setApiSubmitError(err);
setCreateError(true);
}
setCreating(false);
}
}, [apiCall, newRequest, props, workspaceCtx, requestValid]);
const dismissPanel = useCallback(() => navigate('../'), [navigate]);
const renderFooter = useCallback(() => {
let footer = <></>
if (newRequest.type) {
footer = <>
<div style={{textAlign: 'end'}}>
<DefaultButton onClick={() => setNewRequest({} as NewAirlockRequest)} styles={{root:{marginRight: 8}}}>Back</DefaultButton>
<PrimaryButton onClick={() => setHideCreateDialog(false)} disabled={!requestValid}>Create</PrimaryButton>
</div>
</>
}
return footer;
}, [newRequest, setNewRequest, setHideCreateDialog, requestValid]);
let title: string;
let currentStep = <></>;
// Render current step depending on whether type has been selected
if (!newRequest.type) {
title = "New airlock request";
currentStep = <Stack style={{marginTop: '40px'}} tokens={stackTokens}>
<DocumentCard
aria-label="Import"
type={DocumentCardType.compact}
onClick={() => setNewRequest({ type: AirlockRequestType.Import } as NewAirlockRequest)}>
<DocumentCardPreview {...importPreviewGraphic} />
<DocumentCardDetails>
<DocumentCardTitle title="Import" styles={cardTitleStyles} />
<DocumentCardTitle
title="Import files into a workspace from outside of the TRE."
shouldTruncate
showAsSecondaryTitle
/>
</DocumentCardDetails>
</DocumentCard>
<DocumentCard
aria-label="Export"
type={DocumentCardType.compact}
onClick={() => setNewRequest({ type: AirlockRequestType.Export } as NewAirlockRequest)}>
<DocumentCardPreview {...exportPreviewGraphic} />
<DocumentCardDetails>
<DocumentCardTitle title="Export" styles={cardTitleStyles} />
<DocumentCardTitle
title="Export files from a workspace to the outside world."
shouldTruncate
showAsSecondaryTitle
/>
</DocumentCardDetails>
</DocumentCard>
</Stack>;
} else {
title = `New airlock ${newRequest.type} request`;
currentStep = <Stack style={{marginTop: '40px'}} tokens={stackTokens}>
<TextField
label="Title"
placeholder="Enter a request title."
value={newRequest.title}
onChange={onChangetitle}
rows={1}
required
/>
<TextField
label="Business Justification"
placeholder="Enter a justification for your request."
value={newRequest.businessJustification}
onChange={onChangeBusinessJustification}
multiline
rows={10}
required
/>
</Stack>;
}
return (
<Panel
headerText={title}
isOpen={true}
isLightDismiss={true}
onDismiss={dismissPanel}
onRenderFooterContent={renderFooter}
isFooterAtBottom={true}
closeButtonAriaLabel="Close"
type={PanelType.custom}
customWidth="450px"
>
<h4 style={{fontWeight: '400', marginTop: 5}}>
<Icon iconName="CubeShape" style={{ marginRight: '8px', fontSize: '22px', verticalAlign: 'bottom' }} />
{workspaceCtx.workspace?.properties?.display_name}
</h4>
{ currentStep }
<Dialog
hidden={hideCreateDialog}
onDismiss={() => setHideCreateDialog(true)}
dialogContentProps={{
title: 'Create request?',
subText: 'Are you sure you want to create this request?',
}}
>
{
createError && <ExceptionLayout e={apiCreateError} />
}
{
creating
? <Spinner label="Creating..." ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
: <DialogFooter>
<PrimaryButton onClick={create} text="Create" />
<DefaultButton onClick={() => setHideCreateDialog(true)} text="Cancel" />
</DialogFooter>
}
</Dialog>
</Panel>
)
}
const stackTokens: IStackTokens = { childrenGap: 20 };
const { palette, fonts } = getTheme();
const importPreviewGraphic: IDocumentCardPreviewProps = {
previewImages: [
{
previewIconProps: {
iconName: 'ReleaseGate',
styles: {
root: {
fontSize: fonts.superLarge.fontSize,
color: '#0078d7',
backgroundColor: palette.neutralLighterAlt,
},
},
},
width: 144,
},
],
styles: {
previewIcon: { backgroundColor: palette.neutralLighterAlt },
},
};
const exportPreviewGraphic: IDocumentCardPreviewProps = {
previewImages: [
{
previewIconProps: {
iconName: 'Leave',
styles: {
root: {
fontSize: fonts.superLarge.fontSize,
color: '#0078d7',
backgroundColor: palette.neutralLighterAlt,
},
},
},
width: 144,
},
],
styles: {
previewIcon: { backgroundColor: palette.neutralLighterAlt },
},
};
const cardTitleStyles = { root: { fontWeight: '600', paddingTop: 15 } };
|
AzureTRE/ui/app/src/components/shared/airlock/AirlockNewRequest.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/airlock/AirlockNewRequest.tsx",
"repo_id": "AzureTRE",
"token_count": 3274
}
| 139 |
import React, { useContext } from 'react';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { Resource } from '../../models/resource';
import { Workspace } from '../../models/workspace';
import { useComponentManager } from '../../hooks/useComponentManager';
import { ResourceHeader } from '../shared/ResourceHeader';
import { useNavigate } from 'react-router-dom';
import { ResourceBody } from '../shared/ResourceBody';
export const WorkspaceItem: React.FunctionComponent = () => {
const workspaceCtx = useContext(WorkspaceContext);
const navigate = useNavigate();
const latestUpdate = useComponentManager(
workspaceCtx.workspace,
(r: Resource) => workspaceCtx.setWorkspace(r as Workspace),
(r: Resource) => navigate(`/`)
);
return (
<>
<ResourceHeader resource={workspaceCtx.workspace} latestUpdate={latestUpdate}/>
<ResourceBody resource={workspaceCtx.workspace} />
</>
);
};
|
AzureTRE/ui/app/src/components/workspaces/WorkspaceItem.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/workspaces/WorkspaceItem.tsx",
"repo_id": "AzureTRE",
"token_count": 289
}
| 140 |
export interface CostResource {
id: string;
name: string;
costs: Array<CostItem>;
}
export interface CostItem {
cost: number,
currency: string,
date?: string
}
|
AzureTRE/ui/app/src/models/costs.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/costs.ts",
"repo_id": "AzureTRE",
"token_count": 56
}
| 141 |
// jest-dom adds custom jest matchers for asserting on DOM nodes.
// allows you to do things like:
// expect(element).toHaveTextContent(/react/i)
// learn more: https://github.com/testing-library/jest-dom
import '@testing-library/jest-dom';
|
AzureTRE/ui/app/src/setupTests.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/setupTests.ts",
"repo_id": "AzureTRE",
"token_count": 75
}
| 142 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import json
import re
data_dir=sys.argv[1]
def unify_ent2id(ent2id, method='max'):
id2ent = {}
for k, v in ent2id.items():
if v in id2ent:
if method == 'min':
id2ent[v] = k if len(k) < len(id2ent[v]) else id2ent[v]
else:
id2ent[v] = k if len(k) > len(id2ent[v]) else id2ent[v]
else:
id2ent[v] = k
ent2id = {v:k for k, v in id2ent.items()}
return ent2id, id2ent
def sort_triples(triples, text):
sorted_triples = sorted(triples, key=lambda x:text.find(x['chemical']))
return sorted_triples
def build_target_seq_svo(relations, id2chem, id2disease):
answer = ""
for z in relations:
chemical = id2chem[z["chemical"]]
disease = id2disease[z["disease"]]
answer += f"{chemical} correlates with {disease}; "
return answer[:-2] + "."
def build_target_seq_relis(relations, id2chem, id2disease):
answer = ""
for z in relations:
chemical = id2chem[z["chemical"]]
disease = id2disease[z["disease"]]
answer += f"the relation between {chemical} and {disease} exists; "
return answer[:-2] + "."
def loader(fname, fn):
ret = []
null_cnt = 0
suc_cnt = 0
null_flag = False
with open(fname, "r", encoding="utf8") as fr:
data = json.load(fr)
for pmid, v in data.items():
if re.search(r"\W$", v["title"]):
content = v["title"] + " " + v["abstract"]
else:
content = v["title"] + ". " + v["abstract"]
content = content.lower()
if v["relations"] is None or len(v["relations"]) == 0:
if not null_flag:
print(f"Following PMID in {fname} has no extracted relations:")
null_flag = True
print(f"{pmid} ", end="")
null_cnt += 1
else:
chemical2id = v["chemical2id"]
disease2id = v["disease2id"]
unified_chemical2id, id2chemical = unify_ent2id(chemical2id, method='max')
unified_disease2id, id2disease = unify_ent2id(disease2id, method='max')
answer = fn(v["relations"], id2chemical, id2disease)
ret.append((pmid, content, answer))
suc_cnt += 1
if null_flag:
print("")
print(f"{len(data)} samples in {fname} has been processed with {null_cnt} samples has no relations extracted.")
return ret
def dumper(content_list, prefix):
fw_pmid = open(prefix + ".pmid", "w")
fw_content = open(prefix + ".x", "w")
fw_label = open(prefix + ".y", "w")
for ele in content_list:
print(ele[0], file=fw_pmid)
print(ele[1], file=fw_content)
print(ele[2], file=fw_label)
fw_pmid.close()
fw_content.close()
fw_label.close()
def worker(fname, prefix, fn):
ret = loader(fname, fn)
dumper(ret, prefix)
for split in ['train', 'valid', 'test']:
worker(os.path.join(f"{data_dir}", f"{split}.json"), os.path.join(f"{data_dir}", f"relis_{split}"), build_target_seq_relis)
|
BioGPT/examples/RE-BC5CDR/rebuild_data.py/0
|
{
"file_path": "BioGPT/examples/RE-BC5CDR/rebuild_data.py",
"repo_id": "BioGPT",
"token_count": 1519
}
| 143 |
# Text Generation
You can use the pre-trained BioGPT model for free text generation, just as how you use GPT models.
## Model Checkpoint
We provide our pre-trained BioGPT model. See [here](../../README.md#pre-trained-models)
## Generation
We here provide an interactive way for generation:
``` bash
python interactive.py
```
|
BioGPT/examples/text-generation/README.md/0
|
{
"file_path": "BioGPT/examples/text-generation/README.md",
"repo_id": "BioGPT",
"token_count": 90
}
| 144 |
#!/usr/bin/env bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Usage:
# # Do work and commit your work.
# # Format files that differ from origin/main.
# bash format.sh
# # Commit changed files with message 'Run yapf and ruff'
#
#
# YAPF + Clang formatter (if installed). This script formats all changed files from the last mergebase.
# You are encouraged to run this locally before pushing changes for review.
# Cause the script to exit if a single command fails
set -eo pipefail
# this stops git rev-parse from failing if we run this from the .git directory
builtin cd "$(dirname "${BASH_SOURCE:-$0}")"
ROOT="$(git rev-parse --show-toplevel)"
builtin cd "$ROOT" || exit 1
YAPF_VERSION=$(yapf --version | awk '{print $2}')
RUFF_VERSION=$(ruff --version | awk '{print $2}')
CODESPELL_VERSION=$(codespell --version)
# # params: tool name, tool version, required version
tool_version_check() {
if [[ $2 != $3 ]]; then
echo "Wrong $1 version installed: $3 is required, not $2."
exit 1
fi
}
tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-dev.txt | cut -d'=' -f3)"
echo 'bitblas yapf: Check Start'
YAPF_FLAGS=(
'--recursive'
'--parallel'
)
YAPF_EXCLUDES=(
'--exclude' 'build/**'
)
# Format specified files
format() {
yapf --in-place "${YAPF_FLAGS[@]}" "$@"
}
# Format files that differ from main branch. Ignores dirs that are not slated
# for autoformat yet.
format_changed() {
# The `if` guard ensures that the list of filenames is not empty, which
# could cause yapf to receive 0 positional arguments, making it hang
# waiting for STDIN.
#
# `diff-filter=ACM` and $MERGEBASE is to ensure we only format files that
# exist on both branches.
MERGEBASE="$(git merge-base origin/main HEAD)"
if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs -P 5 \
yapf --in-place "${YAPF_EXCLUDES[@]}" "${YAPF_FLAGS[@]}"
fi
}
# Format all files
format_all() {
yapf --in-place "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" .
}
## This flag formats individual files. --files *must* be the first command line
## arg to use this option.
if [[ "$1" == '--files' ]]; then
format "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the
# entire python directory is formatted.
elif [[ "$1" == '--all' ]]; then
format_all
else
# Format only the files that changed in last commit.
format_changed
fi
echo 'bitblas yapf: Done'
echo 'bitblas codespell: Check Start'
# check spelling of specified files
spell_check() {
codespell "$@"
}
spell_check_all(){
codespell --toml pyproject.toml
}
# Spelling check of files that differ from main branch.
spell_check_changed() {
# The `if` guard ensures that the list of filenames is not empty, which
# could cause ruff to receive 0 positional arguments, making it hang
# waiting for STDIN.
#
# `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that
# exist on both branches.
MERGEBASE="$(git merge-base origin/main HEAD)"
if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \
codespell
fi
}
# Run Codespell
## This flag runs spell check of individual files. --files *must* be the first command line
## arg to use this option.
if [[ "$1" == '--files' ]]; then
spell_check "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the
# entire python directory is linted.
elif [[ "$1" == '--all' ]]; then
spell_check_all
else
# Check spelling only of the files that changed in last commit.
spell_check_changed
fi
echo 'BitBLAS codespell: Done'
echo 'bitblas ruff: Check Start'
# Lint specified files
lint() {
ruff "$@"
}
# Lint files that differ from main branch. Ignores dirs that are not slated
# for autolint yet.
lint_changed() {
# The `if` guard ensures that the list of filenames is not empty, which
# could cause ruff to receive 0 positional arguments, making it hang
# waiting for STDIN.
#
# `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that
# exist on both branches.
MERGEBASE="$(git merge-base origin/main HEAD)"
if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \
ruff
fi
}
# Run Ruff
### This flag lints individual files. --files *must* be the first command line
### arg to use this option.
if [[ "$1" == '--files' ]]; then
lint "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the
# entire python directory is linted.
elif [[ "$1" == '--all' ]]; then
lint BitBLAS tests
else
# Format only the files that changed in last commit.
lint_changed
fi
if ! git diff --quiet &>/dev/null; then
echo 'Reformatted files. Please review and stage the changes.'
echo 'Changes not staged for commit:'
echo
git --no-pager diff --name-only
exit 1
fi
echo 'bitblas ruff: Done'
echo 'bitblas: All checks passed'
|
BitBLAS/format.sh/0
|
{
"file_path": "BitBLAS/format.sh",
"repo_id": "BitBLAS",
"token_count": 2050
}
| 145 |
import torch
import numpy as np
import torch.nn.functional as F
from lm_eval.base import BaseLM
from datasets import load_dataset
def set_seed(seed):
np.random.seed(seed)
torch.random.manual_seed(seed)
def get_test_dataset(dataset_name, tokenizer, seqlen=2048):
if dataset_name == "wikitext2":
testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
testdata = "".join(testdata['text']).split('\n')
elif dataset_name == "c4":
testdata = load_dataset('allenai/c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation')['text']
else:
raise NotImplementedError
testdata = [item for item in testdata if item != ""]
tokenized_text = [tokenizer(item, add_special_tokens=False)['input_ids'] + [tokenizer.eos_token_id] for item in testdata]
data, doc = [], [tokenizer.bos_token_id]
for sen in tokenized_text:
if len(sen) > seqlen:
continue
if len(doc) + len(sen) > seqlen:
data.append(doc)
doc = [tokenizer.bos_token_id]
doc.extend(sen)
if len(doc) > 1 and len(doc) <= seqlen:
data.append(doc)
return data
class LMEvalAdaptor(BaseLM):
def __init__(self, model_name, model, tokenizer, batch_size=1, max_length=-1):
super().__init__()
assert isinstance(batch_size, int)
self.model_name = model_name
self.model = model
self.model.eval()
self.tokenizer = tokenizer
self.vocab_size = self.tokenizer.vocab_size
self._batch_size = batch_size
self._max_length = max_length
@property
def eot_token_id(self):
# we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
return self.tokenizer.eos_token_id
@property
def max_length(self):
if self._max_length != -1:
return self._max_length
if hasattr(self.model.config, "n_ctx"):
return self.model.config.n_ctx
elif hasattr(self.model.config, "max_position_embeddings"):
return self.model.config.max_position_embeddings
elif hasattr(self.model.config, "n_positions"):
return self.model.config.n_positions
elif "bloom" in self.model_name:
return 2048
elif "llama" in self.model_name:
return 2048 # TODO: did not check this
elif "mpt" in self.model_name:
return 2048
elif "falcon" in self.model_name:
return 2048
else:
print(self.model.config)
raise NotImplementedError
@property
def max_gen_toks(self):
return 256
@property
def batch_size(self):
return self._batch_size
@property
def device(self):
return "cuda"
def tok_encode(self, string: str, add_special_tokens=True):
return self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
def tok_decode(self, tokens):
return self.tokenizer.decode(tokens)
def loglikelihood(self, requests):
new_reqs = []
for context, continuation in requests:
context, continuation = context.strip(), continuation.strip()
if context == "":
# end of text as context
context_enc = [self.eot_token_id]
else:
context_enc = self.tok_encode(context, add_special_tokens=True)
continuation_enc = self.tok_encode(continuation, add_special_tokens=False)
new_reqs.append(((context, continuation), context_enc, continuation_enc))
return self._loglikelihood_tokens(new_reqs)
def _model_call(self, inps):
"""
inps: a torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call
returns: a torch tensor of shape [batch, sequence, vocab] with the
logits returned from the model
"""
with torch.no_grad():
out = self.model(inps)[0]
return out
def _model_generate(self, context, max_length, eos_token_id):
return self.model.generate(
context, max_length=max_length, eos_token_id=eos_token_id, do_sample=False
)
|
BitBLAS/integration/BitNet/eval_utils.py/0
|
{
"file_path": "BitBLAS/integration/BitNet/eval_utils.py",
"repo_id": "BitBLAS",
"token_count": 1916
}
| 146 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import os
# installing tvm
install_tvm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "3rdparty", "tvm", "python")
if os.path.exists(install_tvm_path) and install_tvm_path not in sys.path:
os.environ["PYTHONPATH"] = install_tvm_path + ":" + os.environ.get("PYTHONPATH", "")
sys.path.insert(0, install_tvm_path)
develop_tvm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", "3rdparty", "tvm", "python")
if os.path.exists(develop_tvm_path) and develop_tvm_path not in sys.path:
os.environ["PYTHONPATH"] = develop_tvm_path + ":" + os.environ.get("PYTHONPATH", "")
sys.path.insert(0, develop_tvm_path)
from . import gpu # noqa: F401
from .base import (
TileDevice, # noqa: F401
fast_tune, # noqa: F401
ApplyDefaultSchedule, # noqa: F401
ApplyFastTuning, # noqa: F401
BlockInfo, # noqa: F401
IterInfo, # noqa: F401
ScheduleRule, # noqa: F401
normalize_prim_func, # noqa: F401
try_inline, # noqa: F401
try_inline_contiguous_spatial, # noqa: F401
)
from . import testing # noqa: F401
from .utils import auto_detect_nvidia_target # noqa: F401
from .ops.general_matmul import MatmulConfig, Matmul # noqa: F401
from .ops.matmul_dequantize import MatmulWeightOnlyDequantizeConfig, MatmulWeightOnlyDequantize # noqa: F401
from .module import Linear # noqa: F401
import logging
from tqdm import tqdm
class TqdmLoggingHandler(logging.Handler):
""" Custom logging handler that directs log output to tqdm progress bar to avoid interference. """
def __init__(self, level=logging.NOTSET):
""" Initialize the handler with an optional log level. """
super().__init__(level)
def emit(self, record):
""" Emit a log record. Messages are written to tqdm to ensure output in progress bars isn't corrupted. """
try:
msg = self.format(record)
tqdm.write(msg)
except Exception:
self.handleError(record)
def set_log_level(level):
""" Set the logging level for the module's logger.
Args:
level (str or int): Can be the string name of the level (e.g., 'INFO') or the actual level (e.g., logging.INFO).
"""
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(level)
def _init_logger():
""" Initialize the logger specific for this module with custom settings and a Tqdm-based handler. """
logger = logging.getLogger(__name__)
handler = TqdmLoggingHandler()
formatter = logging.Formatter(
fmt="%(asctime)s [BitBLAS:%(levelname)s]: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
set_log_level('WARNING')
_init_logger()
__version__ = "0.0.1.dev4"
|
BitBLAS/python/bitblas/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/__init__.py",
"repo_id": "BitBLAS",
"token_count": 1186
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Rasteration Plan For L2 Cache Locality"""
from typing import List
class Rasterization:
def __init__(self) -> None:
pass
def get_code(self) -> List[str]:
raise NotImplementedError()
class NoRasterization(Rasterization):
def __init__(self) -> None:
super().__init__()
def __repr__(self) -> str:
return "<NoRasterization>"
def get_code(self) -> List[str]:
return []
class Rasterization2DRow(Rasterization):
"""
Rasterization by Row, each Row line width is panel_width
_________
_________|
|_________
__________|
"""
def __init__(self, panel_width=4) -> None:
super().__init__()
self.panel_width_ = panel_width
def __repr__(self) -> str:
return f"<Rasterization2DRow({self.panel_width_})>"
def get_code(self) -> List[str]:
raise NotImplementedError()
class Rasterization2DColumn(Rasterization):
"""
Rasterization by Column, each column line width is panel_width
_
| | | |
| | | |
|_| |_|
"""
def __init__(self, panel_width=4) -> None:
super().__init__()
self.panel_width_ = panel_width
def __repr__(self) -> str:
return f"<Rasterization2DColumn({self.panel_width_})>"
def get_device_function(self) -> str:
return """
__device__ __inline__ dim3 rasterization2DColumn(const int panel_width) {
const auto baseBlockIdx = blockIdx.x + gridDim.x *blockIdx.y;
const auto totalPanel = (gridDim.x * gridDim.y +panel_width * gridDim.x - 1) / (panel_width * gridDim.x);
const auto totalBlock = gridDim.x * gridDim.y;
const auto panelIdx = baseBlockIdx / (panel_width *gridDim.x);
const auto strideLd = panelIdx + 1 < totalPanel ?panel_width : (totalBlock - panelIdx * (panel_width *gridDim.x)) / gridDim.x;
const auto bx = (panelIdx & 1) ? gridDim.x -(baseBlockIdx - panelIdx * panel_width * gridDim.x) /strideLd - 1 : (baseBlockIdx - panelIdx * panel_width *gridDim.x) / strideLd;
const auto by = (baseBlockIdx - panelIdx * panel_width *gridDim.x) % strideLd + panelIdx * panel_width;
const auto bz = blockIdx.z;
dim3 blockIdx(bx, by, bz);
return blockIdx;
}
"""
def get_code(self, panel_width: int = None) -> List[str]:
if panel_width is None:
panel_width = self.panel_width_
return [
self.get_device_function(),
"const dim3 blockIdx = rasterization2DColumn({});\n".format(panel_width),
]
|
BitBLAS/python/bitblas/base/roller/rasterization.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/rasterization.py",
"repo_id": "BitBLAS",
"token_count": 1112
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=invalid-name
"""Reduction rule for operators including softmax, layer norm, RMS norm, etc"""
from typing import List, Union
from functools import reduce
from tvm import tir
from tvm.target import Target
from ..base import normalize_prim_func, try_inline_contiguous_spatial
from ..base.analysis import get_root_block, get_reduction_blocks, BlockInfo
from .base import GPUScheduleRule
class GeneralReduction(GPUScheduleRule):
"""General Reduction rule for operators including softmax, layer norm, RMS norm, etc"""
def apply( # pylint: disable=too-many-locals
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Union[None, tir.Schedule, List[tir.Schedule]]:
if not isinstance(func, tir.PrimFunc) or not self.is_target_available(target):
return None
if target.kind.name == "cuda":
len_tx = 256
unroll_depth = 256
else:
len_tx = 64
unroll_depth = 64
sch = tir.Schedule(func)
block_infos = normalize_prim_func(sch)
block_infos = try_inline_contiguous_spatial(sch, block_infos)
if block_infos is None or len(block_infos) == 0:
return None
dom_kind = block_infos[0].dom_kind()
num_leading_s = len(dom_kind) - len(dom_kind.lstrip("S"))
num_trailing_r = len(dom_kind) - len(dom_kind.rstrip("R"))
# Align the number of block iters of the last block.
num_last_block_iter = len(block_infos[-1].dom_kind())
if num_last_block_iter < len(dom_kind):
index_map = tir.IndexMap.from_func(
lambda *iters: (
[tir.const(0, iters[0].dtype)] * (len(dom_kind) - num_last_block_iter)
+ list(iters)
),
ndim=num_last_block_iter,
)
sch.transform_block_layout(block_infos[-1].block_rv, index_map)
try:
# TODO: fix num_leading_s = 0 case
assert num_trailing_r > 0
for block in block_infos[1:-1]:
assert block.dom_kind() == dom_kind
assert block_infos[-1].is_injective()
assert len(block_infos[-1].dom_kind()) <= len(dom_kind)
except AssertionError:
return None
loops = sch.get_loops(block_infos[-1].block_rv)
bx = sch.fuse(*loops[:num_leading_s])
r_loop, tx = sch.split(loops[-1], [None, len_tx])
sch.reorder(tx, r_loop)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
sch.annotate(r_loop, ann_key="pragma_auto_unroll_max_step", ann_val=unroll_depth)
sch.annotate(r_loop, ann_key="pragma_unroll_explicit", ann_val=1)
for block in reversed(block_infos[:-1]):
block = block.block_rv
for i, _ in enumerate(sch.get(block).writes):
sch.set_scope(block, buffer_index=i, storage_scope="shared")
sch.compute_at(block, bx, preserve_unit_loops=True)
r_loop = sch.fuse(*sch.get_loops(block)[-num_trailing_r:])
r_loop, tx = sch.split(r_loop, [None, len_tx])
sch.reorder(tx, r_loop)
sch.bind(tx, "threadIdx.x")
sch.annotate(r_loop, ann_key="pragma_auto_unroll_max_step", ann_val=unroll_depth)
sch.annotate(r_loop, ann_key="pragma_unroll_explicit", ann_val=1)
# TODO: It's just a workaround to avoid unroll spatial loops, because of the bug of
# the pass lower-thread-allreduce. We should fix it in the future.
# sch.annotate(bx, ann_key="pragma_auto_unroll_max_step", ann_val=unroll_depth)
# sch.annotate(bx, ann_key="pragma_unroll_explicit", ann_val=1)
return sch
def sch_inner_reduction_with_config( # pylint: disable=too-many-locals,too-many-branches,too-many-return-statements
self,
func: tir.PrimFunc,
config,
):
block_factors = config.block
thread_factors = config.thread
reduce_therad_factors = config.reduce_thread
# For inter thread reduction case, one thread must only compute one element
assert thread_factors == block_factors
# inline all the other blocks
sch = tir.Schedule(func)
block_infos = normalize_prim_func(sch)
schedule_block: tir.schedule.BlockRV = None
reduction_blocks: List[tir.schedule.BlockRV] = []
for block in block_infos:
s_loops: List[tir.schedule.LoopRV] = []
r_loops: List[tir.schedule.LoopRV] = []
o_loops: List[tir.schedule.LoopRV] = []
dom_kind = block.dom_kind()
block_rv = block.block_rv
if (
any(
[
sch.get(loop_rv).thread_binding is not None
for loop_rv in sch.get_loops(block_rv)
]
)
or len(sch.get_loops(block.block_rv)) == 0
):
continue
for loop, iter_type in zip(sch.get_loops(block_rv), dom_kind):
{"S": s_loops, "R": r_loops, "O": o_loops}[iter_type].append(loop)
if not s_loops:
s_loops.append(sch.add_unit_loop(block_rv))
if len(r_loops) > 0:
# always use the last reduction block for scheduling
schedule_block = block
reduction_blocks.append(block_rv)
# Align the number of block iters of the last block.
dom_kind = schedule_block.dom_kind()
num_leading_s = len(dom_kind) - len(dom_kind.lstrip("S"))
num_trailing_r = len(dom_kind) - len(dom_kind.rstrip("R"))
schedule_block = schedule_block.block_rv
loops = sch.get_loops(schedule_block)
s_loops = loops[:num_leading_s]
r_loops = loops[-num_trailing_r:]
block_axis = []
thread_axis = []
for s_loop, block_factor in zip(s_loops, block_factors):
block_loop, thread_loop = sch.split(s_loop, factors=[None, block_factor])
block_axis.append(block_loop)
thread_axis.append(thread_loop)
axis_order = block_axis + thread_axis
sch.reorder(*axis_order)
blck_fused = sch.fuse(*block_axis)
thrd_fused = sch.fuse(*thread_axis)
sch.bind(blck_fused, "blockIdx.x")
sch.bind(thrd_fused, "threadIdx.y")
reduce_outer_axis, reduce_inner_axis, reduce_inter_threads = [], [], []
for i in config.raxis_order:
loop = r_loops[i]
ro, ri = sch.split(loop, factors=[None, config.rstep[i]])
ri, thd = sch.split(ri, factors=[None, config.reduce_thread[i]])
reduce_inter_threads.append(thd)
reduce_outer_axis.append(ro)
reduce_inner_axis.append(ri)
axis_order = reduce_inter_threads + reduce_outer_axis + reduce_inner_axis
sch.reorder(*axis_order)
fused_reduce_inter_threads = sch.fuse(*reduce_inter_threads)
sch.bind(fused_reduce_inter_threads, "threadIdx.x")
def prod(iterable):
return reduce(lambda x, y: x * y, iterable, 1)
reg_tile = sch.cache_write(schedule_block, 0, "local")
# todo(lei): should add the shared_inputs/stride memory pad analysis at shared memory fusion stage.
for i, input_region in enumerate(sch.get(schedule_block).reads):
if input_region.buffer.name not in config.cached_tensors:
continue
# otherwise cooperative fetch in shared memory.
cache_shared = sch.cache_read(schedule_block, i, "shared")
sch.compute_at(cache_shared, reduce_outer_axis[-1])
dim_offset = (
len(reduce_inner_axis) + len(reduce_outer_axis) + 2
) # outer loops are: blck_fused, thrd_fused, vthread_axis, reduce_outer_axis
if input_region.buffer.name in config.vectorize:
vectorize = config.vectorize[input_region.buffer.name]
else:
vectorize = 1
loops = sch.get_loops(cache_shared)
if len(loops) == dim_offset:
# handle fetching only one element
loops.append(sch.add_unit_loop(schedule_block))
assert len(loops) > dim_offset
_, ty, tx, tv = sch.split(
sch.fuse(*loops[dim_offset:]),
factors=[
None,
int(prod(thread_factors)),
int(prod(reduce_therad_factors)),
vectorize,
],
)
sch.vectorize(tv)
sch.bind(ty, "threadIdx.y")
sch.bind(tx, "threadIdx.x")
sch.reverse_compute_at(reg_tile, thrd_fused)
# resolve compute_at
block_infos = try_inline_contiguous_spatial(sch, block_infos)
if block_infos is None or len(block_infos) == 0:
return None
return sch
def sch_outer_reduction_with_config( # pylint: disable=too-many-locals,too-many-branches,too-many-return-statements
self,
func: tir.PrimFunc,
config,
):
block_factors = config.block
thread_factors = config.thread
step_factors = config.step
# inline all the other blocks
sch = tir.Schedule(func)
block_infos = normalize_prim_func(sch)
schedule_block: BlockInfo = None
for block in block_infos:
s_loops: List[tir.schedule.LoopRV] = []
r_loops: List[tir.schedule.LoopRV] = []
o_loops: List[tir.schedule.LoopRV] = []
dom_kind = block.dom_kind()
block_rv = block.block_rv
if (
any(
[
sch.get(loop_rv).thread_binding is not None
for loop_rv in sch.get_loops(block_rv)
]
)
or len(sch.get_loops(block.block_rv)) == 0
):
continue
for loop, iter_type in zip(sch.get_loops(block_rv), dom_kind):
{"S": s_loops, "R": r_loops, "O": o_loops}[iter_type].append(loop)
if not s_loops:
s_loops.append(sch.add_unit_loop(block_rv))
if len(r_loops) > 0:
# always use the last reduction block for scheduling
schedule_block = block
# Align the number of block iters of the last block.
dom_kind = schedule_block.dom_kind()
num_leading_s = len(dom_kind) - len(dom_kind.lstrip("S"))
num_trailing_r = len(dom_kind) - len(dom_kind.rstrip("R"))
num_last_block_iter = len(block_infos[-1].dom_kind())
if num_last_block_iter < len(dom_kind):
index_map = tir.IndexMap.from_func(
lambda *iters: (
[tir.const(0, iters[0].dtype)] * (len(dom_kind) - num_last_block_iter)
+ list(iters)
),
ndim=num_last_block_iter,
)
sch.transform_block_layout(block_infos[-1].block_rv, index_map)
schedule_block = schedule_block.block_rv
loops = sch.get_loops(schedule_block)
s_loops = loops[:num_leading_s]
r_loops = loops[-num_trailing_r:]
reg_tile = sch.cache_write(schedule_block, 0, "local")
block_axis = []
vthread_axis = []
thread_axis = []
inner_axis = []
for s_loop, block_factor, step_factor, thread_factor in zip(
s_loops, block_factors, step_factors, thread_factors
):
block_loop, inner_loop = sch.split(s_loop, factors=[None, block_factor])
vthread_loop, inner_loop = sch.split(
inner_loop, factors=[None, thread_factor * step_factor]
)
thread_loop, inner_loop = sch.split(inner_loop, factors=[None, step_factor])
block_axis.append(block_loop)
vthread_axis.append(vthread_loop)
thread_axis.append(thread_loop)
inner_axis.append(inner_loop)
reduce_outer_axis, reduce_inner_axis = [], []
for i in config.raxis_order:
loop = r_loops[i]
ro, ri = sch.split(loop, factors=[None, config.rstep[i]])
reduce_outer_axis.append(ro)
reduce_inner_axis.append(ri)
vthread_axis = list(reversed(vthread_axis)) # inner virtual thread first
axis_order = (
block_axis
+ vthread_axis
+ thread_axis
+ reduce_outer_axis
+ reduce_inner_axis
+ inner_axis
)
sch.reorder(*axis_order)
blck_fused = sch.fuse(*block_axis)
thrd_fused = sch.fuse(*thread_axis)
sch.bind(blck_fused, "blockIdx.x")
sch.bind(thrd_fused, "threadIdx.x")
if len(vthread_axis) > 3:
vthread_axis = vthread_axis[0:2] + [sch.fuse(*vthread_axis[2:])]
for i, ax in enumerate(vthread_axis):
sch.bind(ax, "vthread" + [".x", ".y", ".z"][i])
# todo(lei): should add the shared_inputs/stride memory pad analysis at shared memory fusion stage.
for i, input_region in enumerate(sch.get(schedule_block).reads):
if input_region.buffer.name not in config.cached_tensors:
continue
# otherwise cooperative fetch in shared memory.
cache_shared = sch.cache_read(schedule_block, i, "shared")
sch.compute_at(cache_shared, reduce_outer_axis[-1])
dim_offset = (
len(vthread_axis) + len(reduce_outer_axis) + 2
) # outer loops are: blck_fused, thrd_fused, vthread_axis, reduce_outer_axis
if input_region.buffer.name in config.vectorize:
vectorize = config.vectorize[input_region.buffer.name]
else:
vectorize = 1
loops = sch.get_loops(cache_shared)
if len(loops) == dim_offset:
# handle fetching only one element
loops.append(sch.add_unit_loop(schedule_block))
assert len(loops) > dim_offset
def prod(iterable):
return reduce(lambda x, y: x * y, iterable, 1)
_, tx, tv = sch.split(
sch.fuse(*loops[dim_offset:]), factors=[None, int(prod(thread_factors)), vectorize]
)
sch.vectorize(tv)
sch.bind(tx, "threadIdx.x")
sch.reverse_compute_at(reg_tile, thrd_fused)
sch.decompose_reduction(schedule_block, reduce_outer_axis[0])
# resolve compute_at
block_infos = try_inline_contiguous_spatial(sch, block_infos)
if block_infos is None or len(block_infos) == 0:
return None
return sch
def sch_mutiple_reductions_with_config( # pylint: disable=too-many-locals,too-many-branches,too-many-return-statements
self,
func: tir.PrimFunc,
config,
):
block_factors = config.block
thread_factors = config.thread
reduce_therad_factors = config.reduce_thread
sch = tir.Schedule(func)
block_infos = normalize_prim_func(sch)
block_infos = try_inline_contiguous_spatial(sch, block_infos)
if block_infos is None or len(block_infos) == 0:
return None
def prod(iterable):
return reduce(lambda x, y: x * y, iterable, 1)
len_tx = prod(thread_factors) * prod(reduce_therad_factors)
block_factor = prod(block_factors)
dom_kind = block_infos[0].dom_kind()
num_leading_s = len(dom_kind) - len(dom_kind.lstrip("S"))
num_trailing_r = len(dom_kind) - len(dom_kind.rstrip("R"))
# Align the number of block iters of the last block.
num_last_block_iter = len(block_infos[-1].dom_kind())
if num_last_block_iter < len(dom_kind):
index_map = tir.IndexMap.from_func(
lambda *iters: (
[tir.const(0, iters[0].dtype)] * (len(dom_kind) - num_last_block_iter)
+ list(iters)
),
ndim=num_last_block_iter,
)
sch.transform_block_layout(block_infos[-1].block_rv, index_map)
try:
# TODO: fix num_leading_s = 0 case
assert num_trailing_r > 0
for block in block_infos[1:-1]:
assert block.dom_kind() == dom_kind
assert block_infos[-1].is_injective()
assert len(block_infos[-1].dom_kind()) <= len(dom_kind)
except AssertionError:
return None
loops = sch.get_loops(block_infos[-1].block_rv)
bx, _ = sch.split(sch.fuse(*loops[:num_leading_s]), factors=[None, block_factor])
r_loop, tx = sch.split(loops[-1], [None, len_tx])
sch.reorder(tx, r_loop)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
for block in reversed(block_infos[:-1]):
block = block.block_rv
for i, _ in enumerate(sch.get(block).writes):
sch.set_scope(block, buffer_index=i, storage_scope="shared")
sch.compute_at(block, bx, preserve_unit_loops=True)
r_loop = sch.fuse(*sch.get_loops(block)[-num_trailing_r:])
r_loop, tx = sch.split(r_loop, [None, len_tx])
sch.reorder(tx, r_loop)
sch.bind(tx, "threadIdx.x")
return sch
def apply_config( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
config,
) -> tir.Schedule:
# check the number of reduction blocks
sch = tir.Schedule(func)
root_block = get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
reduction_blocks = get_reduction_blocks(sch, blocks)
if len(reduction_blocks) > 1:
# schedule for multiple reduction blocks (e.g. softmax)
return self.sch_mutiple_reductions_with_config(func, config)
if any([t > 1 for t in config.reduce_thread]):
# todo(lei) should implement block reduction schedule
return self.sch_inner_reduction_with_config(func, config)
else:
return self.sch_outer_reduction_with_config(func, config)
|
BitBLAS/python/bitblas/gpu/general_reduction.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/general_reduction.py",
"repo_id": "BitBLAS",
"token_count": 9221
}
| 149 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pre-transformed tir expression of matmul
import tvm
from tvm import te, tir
def conv2d_nhwc_ohwi(
n,
f,
h,
w,
c,
kh,
kw,
s,
d,
p,
in_dtype="float16",
accum_dtype="float16",
out_dtype="float16",
):
A = te.placeholder((n, h, w, c), name="input", dtype=in_dtype)
B = te.placeholder((f, kh, kw, c), name="weight", dtype=in_dtype)
pad_shape = (n, h + 2 * p, w + 2 * p, c)
pad_value = tir.const(0.0, A.dtype)
pad = te.compute(
pad_shape,
lambda n, h, w, c: te.if_then_else(
tir.all(
h >= p,
w >= p,
h < pad_shape[1] - p,
w < pad_shape[2] - p,
),
A[n, h - p, w - p, c],
pad_value,
),
name="pad",
)
kernel_h, kernel_w = kh, kw
stride_h, stride_w = s, s
dilation_h, dilation_w = d, d
out_h = (h + 2 * p - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1
out_w = (w + 2 * p - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1
out_shape = (n, out_h, out_w, f)
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
c = te.reduce_axis((0, c), name="c")
C = te.compute(
out_shape,
lambda n, h, w, f: te.sum(
pad[n, h * stride_h + kh * tir.any(dilation_h), w * stride_w + kw * tir.any(dilation_w),
c,].astype(accum_dtype) * B[f, kh - 1 - tir.any(dilation_h), kw - 1 - tir.any(
dilation_w), c].astype(accum_dtype),
axis=[kh, kw, c],
),
name="C",
)
args = [A, B]
last_output = C
if accum_dtype != out_dtype:
D = te.compute(out_shape, lambda n, h, w, c: C[n, h, w, c].astype(out_dtype), name="D")
last_output = D
args.append(last_output)
func = te.create_prim_func(args)
return tvm.IRModule.from_expr(func)
def conv2d_nhwc_hwio(
n,
f,
h,
w,
c,
kh,
kw,
s,
d,
p,
in_dtype="float16",
accum_dtype="float16",
out_dtype="float16",
):
A = te.placeholder((n, h, w, c), name="input", dtype=in_dtype)
B = te.placeholder((kh, kw, c, f), name="weight", dtype=in_dtype)
pad_shape = (n, h + 2 * p, w + 2 * p, c)
pad_value = tir.const(0.0, A.dtype)
pad = te.compute(
pad_shape,
lambda n, h, w, c: te.if_then_else(
tir.all(
h >= p,
w >= p,
h < pad_shape[1] - p,
w < pad_shape[2] - p,
),
A[n, h - p, w - p, c],
pad_value,
),
name="pad",
)
kernel_h, kernel_w = kh, kw
stride_h, stride_w = s, s
dilation_h, dilation_w = d, d
out_h = (h + 2 * p - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1
out_w = (w + 2 * p - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1
out_shape = (n, out_h, out_w, f)
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
c = te.reduce_axis((0, c), name="c")
C = te.compute(
out_shape,
lambda n, h, w, f: te.sum(
pad[n, h * stride_h + kh * tir.any(dilation_h), w * stride_w + kw * tir.any(dilation_w),
c,].astype(accum_dtype) * B[kh - 1 - tir.any(dilation_h), kw - 1 - tir.any(
dilation_w), c, f].astype(accum_dtype),
axis=[kh, kw, c],
),
name="C",
)
args = [A, B]
last_output = C
if accum_dtype != out_dtype:
D = te.compute(out_shape, lambda n, h, w, c: C[n, h, w, c].astype(out_dtype), name="D")
last_output = D
args.append(last_output)
func = te.create_prim_func(args)
return tvm.IRModule.from_expr(func)
def select_implementation(
n,
f,
h,
w,
c,
kh,
kw,
s,
d,
p,
in_dtype="float16",
accum_dtype="float16",
out_dtype="float16",
input_layout="nhwc",
weight_layout="ohwi",
):
assert input_layout in ["nhwc", "nchw"]
if input_layout == "nhwc" and weight_layout == "ohwi":
return conv2d_nhwc_ohwi(
n,
f,
h,
w,
c,
kh,
kw,
s,
d,
p,
in_dtype,
accum_dtype,
out_dtype,
)
elif input_layout == "nhwc" and weight_layout == "hwio":
return conv2d_nhwc_hwio(
n,
f,
h,
w,
c,
kh,
kw,
s,
d,
p,
in_dtype,
accum_dtype,
out_dtype,
)
else:
raise ValueError("Unsupported input_layout: {} and weight_layout: {}".format(
input_layout, weight_layout))
|
BitBLAS/python/bitblas/ops/impl/convolution2d_impl.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/impl/convolution2d_impl.py",
"repo_id": "BitBLAS",
"token_count": 2829
}
| 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .annotate_decode_block import AnnotateDecodeInformation
from .weight_only_propagate import WeightOnlyLayoutPropagation
|
BitBLAS/python/bitblas/relax/transform/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/relax/transform/__init__.py",
"repo_id": "BitBLAS",
"token_count": 51
}
| 151 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from tvm.script import tir as T
import bitblas
from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy
from bitblas.base.roller.arch import CUDA
from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags
from bitblas.base.utils import apply_and_build
from bitblas.ops.impl.matmul_impl import matmul_nt, matmul_nt_dequantize_b
import numpy as np
def test_f16_f16_gemm():
ir_module = matmul_nt(1, 16384, 16384, "float16", "float16")
func = ir_module["main"]
target = tvm.target.Target("nvidia/nvidia-a100")
arch = CUDA(target)
policy = DefaultPolicy(func=func, arch=arch)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception:
tags = None
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(20)
cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)
print(
"[BitBLAS] The best latency of top 1 is {:.3f} ms".format(
cpresults[0].latency * 1e3
)
)
print(
"[BitBLAS] The best latency of top 20 is {:.3f} ms".format(best.latency * 1e3)
)
def test_f16_i4_gemm(M=1, N=16384, K=16384, bit=4, fast_decoding=True):
ir_module = matmul_nt_dequantize_b(
M,
N,
K,
"float16",
bit=bit,
storage_dtype="uint32",
with_scaling=True,
group_size=-1,
fast_decoding=fast_decoding,
)
func = ir_module["main"]
target = tvm.target.Target("nvidia/nvidia-a100")
arch = CUDA(target)
policy = DefaultPolicy(func=func, arch=arch)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception:
tags = None
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(20)
cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)
assert best
test_f16_i4_gemm()
|
BitBLAS/testing/python/type_conversion/test_lop3_type_conversion.py/0
|
{
"file_path": "BitBLAS/testing/python/type_conversion/test_lop3_type_conversion.py",
"repo_id": "BitBLAS",
"token_count": 920
}
| 152 |
date ; hostname ; pwd
EXP_NODES=1
EXP_IS=384
EXP_PGB=16
EXP_PGEB=16
EXP_LR=4.5e-6
EXP_BS=256
EXP_ME=30
EXP_WS=0.1
EXP_WD=0.01
EXP_LMH=5
EXP_LMC=5
EXP_LP=BridgeTower_pt_base.ckpt
EXP_RGM=blip_randaug_wc
EXP_PGEBT=256
EXP_PGEBI=128
EXP_GWG=True
EXP_GAII=False
EXP_IC=1
export MASTER_ADDR=$HOSTNAME
export MASTER_PORT=19800
export NODE_RANK=0
PREFIX_NAME="ftfpt"
echo $MASTER_ADDR, $MASTER_PORT, $NODE_RANK, $EXP_NODES, $EXP_IS, $EXP_PGB, $EXP_PGEB, $EXP_LR, $EXP_BS, $EXP_ME, $EXP_WS, $EXP_WD, $EXP_LMH, $EXP_LMC, $EXP_RGM
TIME=$(date "+%Y%m%d%H%M")
RUN_NAME=""$PREFIX_NAME"_"$EXP_IS"_"$EXP_PGB"_"$EXP_PGEB"_"$EXP_LR"_"$EXP_BS"_"$EXP_ME"_"$EXP_WS"_"$EXP_WD"_"$EXP_LMH"_"$EXP_LMC"_"$EXP_RGM"_"$TIME""
echo $RUN_NAME
python run.py with run_name=$RUN_NAME task_finetune_irtr_itm_itc_coco_clip_bert bt clip16 text_roberta $EXP_RGM num_gpus=8 num_nodes=$EXP_NODES load_path=~/BT/best_checkpoints/$EXP_LP image_size=$EXP_IS per_gpu_batchsize=$EXP_PGB per_gpu_eval_batchsize=$EXP_PGEB learning_rate=$EXP_LR batch_size=$EXP_BS max_epoch=$EXP_ME warmup_steps=$EXP_WS weight_decay=$EXP_WD lr_mult_head=$EXP_LMH lr_mult_cross_modal=$EXP_LMC per_gpu_eval_batchsize_text=$EXP_PGEBT per_gpu_eval_batchsize_image=$EXP_PGEBI gather_with_grads=$EXP_GWG gather_all_image_inputs=$EXP_GAII image_chunks=$EXP_IC
date
|
BridgeTower/scripts/ftfpt_base_irtr_itm_itc_coco.sh/0
|
{
"file_path": "BridgeTower/scripts/ftfpt_base_irtr_itm_itc_coco.sh",
"repo_id": "BridgeTower",
"token_count": 654
}
| 153 |
from sacred import Experiment
ex = Experiment("VL")
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"itc": 0,
"itm_itc": 0,
"irtr_itm_itc": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"snli": 0,
}
ret.update(d)
return ret
@ex.config
def config():
# below params varies with the environment
root_dir = "~/BT"
data_root = f"{root_dir}/dataset/fine-tune"
log_dir = f"{root_dir}/logs"
output_dir = f"{root_dir}/checkpoints"
load_path = ""
num_gpus = 8
num_nodes = 1
num_workers = 8
precision = 32
per_gpu_batchsize = 0 # you should define this manually with per_gpu_batch_size=#
per_gpu_eval_batchsize = 0
# Wandb Logger Setting
exp_name = "BT"
group_name = "exp/task"
run_name = "finetune"
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
test_only = False
log_every_n_steps = 50
# Experiment Setting
seed = 0
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
# Image setting
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 224
patch_size = 32
draw_false_image = 0
image_only = False
resolution_before = 224
# Text Setting
vqav2_label_size = 3129
max_text_len = 40
tokenizer = "bert-base-uncased"
vocab_size = 30522
whole_word_masking = False # note that whole_word_masking does not work for RoBERTa
mlm_prob = 0.15
draw_false_text = 0
# Transformer Setting
input_image_embed_size = 768
input_text_embed_size = 768
vit = 'CLIP-ViT-B/32'
hidden_size = 768
num_heads = 12
num_layers = 6
mlp_ratio = 4
drop_rate = 0.1
# Optimizer Setting
optim_type = "adamw"
learning_rate = 1e-5
weight_decay = 0.01
decay_power = 1
max_epoch = 10
max_steps = -1
warmup_steps = 10000
end_lr = 0
lr_mult_head = 5 # multiply lr for downstream heads
lr_mult_cross_modal = 5 # multiply lr for the cross-modal module
# Downstream Setting
get_recall_metric = False
# Debug
debug_num = 0
# METER Setting
meter_fusion = False
vit_remove_last = False
# BT Setting
model_type = "BT" # "METER", "BT"
vit_layernorm_shared = True
vit_layernorm_init_from_vit = False
task_head_layers = 2 # 1, 2
head_hidden_scale = 1 # 1, 2, 3, 4
per_gpu_eval_batchsize_text = 256
per_gpu_eval_batchsize_image = 128
per_gpu_eval_batchsize_fusion_text = 500
k_test = 128 # 128, 256
amp_flag = True
task_threshold = 0 # the task will be executed if it > task_threshold
nlvr2_drop_rate = 0.1
## contrastive setting
temperature = 0.07
contrastive_hidden_size = 256
gather_with_grads = True
gather_global_negative = False
gather_all_image_inputs = False # if all image features cannot be gathered in one GPU, then gather all image inputs
image_chunks = 1 # if k_test x image need too many memory, then split them into chunks to calculate rerank scores
text_chunks = 1 # if k_test x text need too many memory, then split them into chunks to calculate rerank scores
save_memory = False
# model type
@ex.named_config
def meter():
model_type = "METER"
@ex.named_config
def bt():
model_type = "BT"
@ex.named_config
def bt_large():
hidden_size = 1024
num_heads = 16
num_layers = 6
# pre-train task setting
@ex.named_config
def task_mlm_itm_clip_bert():
group_name = "mlm_itm"
run_name = "pre-train"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096
max_epoch = 10
max_steps = 100000
warmup_steps = 0.1
whole_word_masking = True
vocab_size = 30522
max_text_len = 50
image_size = 224
tokenizer = "bert-base-uncased"
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
learning_rate = 1e-5
lr_mult_head = 5
lr_mult_cross_modal = 5
draw_false_image = 1
@ex.named_config
def task_mlm_itm_itc():
group_name = "mlm_itm_itc"
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
contrastive_hidden_size = 256 # 256, 512, 768
@ex.named_config
def task_mlm_itm_itc_hard():
group_name = "mlm_itm_itc_hard"
loss_names = _loss_names({"itm_itc": 1, "mlm": 1})
draw_false_image = 0
contrastive_hidden_size = 256 # 256, 512, 768
# fine-tune task setting
@ex.named_config
def task_finetune_vqa_clip_bert():
group_name = "vqa"
run_name = "finetune"
datasets = ["vqa"]
loss_names = _loss_names({"vqa": 1})
batch_size = 512
max_epoch = 10
max_steps = -1
warmup_steps = 0.1
learning_rate = 1e-5
lr_mult_head = 50
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 50
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 576
@ex.named_config
def task_finetune_snli_clip_bert():
group_name = "snli"
run_name = "finetune"
datasets = ["snli"]
loss_names = _loss_names({"snli": 1})
batch_size = 64
max_epoch = 5
max_steps = -1
warmup_steps = 0.1
learning_rate = 2e-6
lr_mult_head = 10
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 50
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 384
@ex.named_config
def task_finetune_irtr_f30k_clip_bert():
group_name = "irtr_f30k"
run_name = "finetune"
datasets = ["f30k"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 512
max_epoch = 10
max_steps = -1
warmup_steps = 0.1
draw_false_image = 1
draw_false_text = 15
learning_rate = 5e-6
lr_mult_head = 5
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 40
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 384
@ex.named_config
def task_finetune_irtr_itm_itc_f30k_clip_bert():
group_name = "irtr_itm_itc_f30k"
run_name = "finetune"
datasets = ["f30k"]
loss_names = _loss_names({"irtr_itm_itc": 1})
batch_size = 512
max_epoch = 10
max_steps = -1
warmup_steps = 0.1
draw_false_image = 0
draw_false_text = 0
learning_rate = 5e-6
lr_mult_head = 5
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 40
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 384
k_test = 128
get_recall_metric = True
@ex.named_config
def task_finetune_nlvr2_clip_bert():
group_name = "nlvr2"
run_name = "finetune"
datasets = ["nlvr2"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 256
max_epoch = 10
max_steps = -1
warmup_steps = 0.1
learning_rate = 1e-5
lr_mult_head = 10
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 50
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 384
@ex.named_config
def task_finetune_irtr_coco_clip_bert():
group_name = "irtr_coco"
run_name = "finetune"
datasets = ["coco"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 512
max_epoch = 10
max_steps = -1
warmup_steps = 0.1
draw_false_image = 1
draw_false_text = 15
learning_rate = 5e-6
lr_mult_head = 5
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 40
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 384
@ex.named_config
def task_finetune_irtr_itm_itc_coco_clip_bert():
group_name = "irtr_itm_itc_coco"
run_name = "finetune"
datasets = ["coco"]
loss_names = _loss_names({"irtr_itm_itc": 1})
batch_size = 512
max_epoch = 10
max_steps = -1
warmup_steps = 0.1
draw_false_image = 0
draw_false_text = 0
learning_rate = 5e-6
lr_mult_head = 5
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 40
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 384
k_test = 256
get_recall_metric = True
# Named configs for "etc" which are orthogonal to "env" and "task", need to be added at the end
# vision encoder
@ex.named_config
def vit16_224():
vit = 'vit_base_patch16_224'
image_size = 224
patch_size = 16
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def vit16_384():
# used by METER
vit = 'vit_base_patch16_384'
image_size = 224
patch_size = 32
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def vit32_384():
# used by ViLT
vit = 'vit_base_patch32_384'
image_size = 384
patch_size = 32
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def vit16_224_in21k():
vit = 'vit_base_patch16_224_in21k'
image_size = 224
patch_size = 16
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def vit32_224_in21k():
vit = 'vit_base_patch32_224_in21k'
image_size = 224
patch_size = 32
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def deit16_224():
# used by ALBEF
vit = 'vit_deit_base_patch16_224'
image_size = 224
patch_size = 16
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def deit16_384():
# used by METER
vit = 'vit_deit_base_patch16_384'
image_size = 384
patch_size = 16
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
## bt don't support swin transformer
@ex.named_config
def swin32_base224():
vit = "swin_base_patch4_window7_224_in22k"
patch_size = 32
image_size = 224
train_transform_keys = ["imagenet"]
val_transform_keys = ["imagenet"]
input_image_embed_size = 1024
resolution_before = 224
@ex.named_config
def swin32_base384():
vit = "swin_base_patch4_window12_384_in22k"
patch_size = 32
image_size = 384
train_transform_keys = ["imagenet"]
val_transform_keys = ["imagenet"]
input_image_embed_size = 1024
resolution_before = 384
@ex.named_config
def swin32_large384():
vit = "swin_large_patch4_window12_384_in22k"
patch_size = 32
image_size = 384
train_transform_keys = ["imagenet"]
val_transform_keys = ["imagenet"]
input_image_embed_size = 1536
resolution_before = 384
@ex.named_config
def clip32():
vit = 'CLIP-ViT-B/32'
image_size = 224
patch_size = 32
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def clip16():
vit = 'CLIP-ViT-B/16'
image_size = 224
patch_size = 16
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def clip14_large():
vit = 'CLIP-ViT-L/14'
image_size = 224
patch_size = 14
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 1024
# text encoder
@ex.named_config
def text_roberta():
tokenizer = "roberta-base"
vocab_size = 50265
input_text_embed_size = 768
whole_word_masking = False
@ex.named_config
def text_roberta_large():
tokenizer = "roberta-large"
vocab_size = 50265
input_text_embed_size = 1024
whole_word_masking = False
# random augmentation
@ex.named_config
def imagenet_randaug():
train_transform_keys = ["imagenet_randaug"]
@ex.named_config
def clip_randaug():
train_transform_keys = ["clip_randaug"]
@ex.named_config
def clip_pure():
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
@ex.named_config
def blip_pure():
train_transform_keys = ["blip"]
val_transform_keys = ["blip"]
@ex.named_config
def blip_randaug():
train_transform_keys = ["blip_randaug"]
val_transform_keys = ["blip"]
@ex.named_config
def blip_randaug_wc():
train_transform_keys = ["blip_randaug_wc"]
val_transform_keys = ["blip"]
@ex.named_config
def blip_randaug_wohf():
train_transform_keys = ["blip_randaug_wohf"]
val_transform_keys = ["blip"]
@ex.named_config
def blip_randaug_pretrain():
train_transform_keys = ["blip_randaug_pretrain"]
val_transform_keys = ["blip"]
|
BridgeTower/src/config.py/0
|
{
"file_path": "BridgeTower/src/config.py",
"repo_id": "BridgeTower",
"token_count": 5581
}
| 154 |
from .base_dataset import BaseDataset
class F30KCaptionKarpathyDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "train":
names = ["f30k_caption_karpathy_train", "f30k_caption_karpathy_val"]
elif split == "val":
# names = ["f30k_caption_karpathy_val"]
names = ["f30k_caption_karpathy_test"] # ViLT, METER
elif split == "test":
names = ["f30k_caption_karpathy_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
|
BridgeTower/src/datasets/f30k_caption_karpathy_dataset.py/0
|
{
"file_path": "BridgeTower/src/datasets/f30k_caption_karpathy_dataset.py",
"repo_id": "BridgeTower",
"token_count": 316
}
| 155 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import json
from tqdm import tqdm
import functools
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
import torch.distributed as dist
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
# pre-train
def compute_mlm(pl_module, batch, split):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
loss_name = 'mlm'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{split}_{loss_name}_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/accuracy", acc)
return ret
def compute_itm(pl_module, batch, split):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(batch)
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
loss_name = 'itm'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{split}_{loss_name}_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/accuracy", acc)
return ret
def compute_itc(pl_module, batch, split):
assert batch["image"][0].size(0) == len(batch["text"])
bs, rank = len(batch["text"]), torch.distributed.get_rank()
with torch.no_grad():
pl_module.temperature.clamp_(0.001, 0.5)
infer = pl_module.get_uni_modal_features(batch, itc=True)
unimodal_feats_text = infer['unimodal_feats_text']
unimodal_feats_image = infer['unimodal_feats_image']
if pl_module.hparams.config["gather_with_grads"]:
gather_unimodal_feats_text = pl_module.all_gather(unimodal_feats_text, sync_grads=True)
gather_unimodal_feats_image = pl_module.all_gather(unimodal_feats_image, sync_grads=True)
else:
with torch.no_grad():
gather_unimodal_feats_text = pl_module.all_gather(unimodal_feats_text)
gather_unimodal_feats_image = pl_module.all_gather(unimodal_feats_image)
gather_unimodal_feats_text[rank] = unimodal_feats_text
gather_unimodal_feats_image[rank] = unimodal_feats_image
gather_unimodal_feats_text = gather_unimodal_feats_text.view((-1,) + (gather_unimodal_feats_text.shape)[2:])
gather_unimodal_feats_image = gather_unimodal_feats_image.view((-1,) + (gather_unimodal_feats_image.shape)[2:])
logit_scale = torch.log(1 / pl_module.temperature).exp()
itc_logits_i2t = logit_scale * unimodal_feats_image @ gather_unimodal_feats_text.t()
itc_logits_t2i = logit_scale * unimodal_feats_text @ gather_unimodal_feats_image.t()
itc_labels = torch.arange(bs).to(pl_module.device)
itc_labels = itc_labels + bs * rank
i2t_loss = F.cross_entropy(itc_logits_i2t, itc_labels)
t2i_loss = F.cross_entropy(itc_logits_t2i, itc_labels)
itc_loss = (i2t_loss + t2i_loss) / 2
ret = {
"itc_loss": itc_loss,
}
loss_name = 'itc'
if pl_module.hparams.config["num_layers"] == 0:
loss_name = 'irtr_itm'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["itc_loss"])
pl_module.log(f"{split}/{loss_name}/loss", loss)
return ret
def compute_itm_itc(pl_module, batch, split, pretrain=False):
# REMEMBER: No need to draw false images for image text matching in data preprocessing.
assert batch["image"][0].size(0) == len(batch["text"])
bs, rank = len(batch["text"]), torch.distributed.get_rank()
# forward the positive image-text pair
with torch.no_grad():
pl_module.temperature.clamp_(0.001, 0.5)
infer = pl_module.get_uni_modal_features(batch, fusion_features=True, itc=True)
unimodal_feats_text = infer['unimodal_feats_text']
unimodal_feats_image = infer['unimodal_feats_image']
if pl_module.hparams.config["gather_with_grads"]:
gather_unimodal_feats_text = pl_module.all_gather(unimodal_feats_text, sync_grads=True)
gather_unimodal_feats_image = pl_module.all_gather(unimodal_feats_image, sync_grads=True)
else:
with torch.no_grad():
gather_unimodal_feats_text = pl_module.all_gather(unimodal_feats_text)
gather_unimodal_feats_image = pl_module.all_gather(unimodal_feats_image)
gather_unimodal_feats_text[rank] = unimodal_feats_text
gather_unimodal_feats_image[rank] = unimodal_feats_image
gather_unimodal_feats_text = gather_unimodal_feats_text.view((-1,) + (gather_unimodal_feats_text.shape)[2:])
gather_unimodal_feats_image = gather_unimodal_feats_image.view((-1,) + (gather_unimodal_feats_image.shape)[2:])
logit_scale = torch.log(1 / pl_module.temperature).exp()
itc_logits_i2t = logit_scale * unimodal_feats_image @ gather_unimodal_feats_text.t()
itc_logits_t2i = logit_scale * unimodal_feats_text @ gather_unimodal_feats_image.t()
if pretrain:
itc_labels = torch.arange(bs).to(pl_module.device)
itc_labels = itc_labels + bs * rank
else:
idx = torch.LongTensor(batch["img_index"]).view(-1, 1).to(pl_module.device)
idx_all = pl_module.all_gather(idx).view(-1, 1)
assert idx_all.size(0) == gather_unimodal_feats_image.size(0)
idx_all = torch.eq(idx_all, idx_all.t()).to(pl_module.device)
idx_all = idx_all[bs * rank:bs * (rank+1)]
pos_idx = idx_all.float()
assert pos_idx.size(0) == len(idx)
itc_labels = pos_idx / pos_idx.sum(1, keepdim=True)
i2t_loss = F.cross_entropy(itc_logits_i2t, itc_labels)
t2i_loss = F.cross_entropy(itc_logits_t2i, itc_labels)
itc_loss = (i2t_loss + t2i_loss) / 2
if pretrain:
loss_name = 'itc'
else:
loss_name = 'irtr_itc'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(itc_loss)
pl_module.log(f"{split}/{loss_name}/loss", loss)
# sample hard negative images for image text matching from image text contrastive logits
if pl_module.hparams.config["gather_global_negative"]:
# select a negative image for each text
with torch.no_grad():
weights_i2t = F.softmax(itc_logits_i2t, dim=-1)
weights_t2i = F.softmax(itc_logits_t2i, dim=-1)
if pretrain:
weights_i2t[:, bs * rank:bs * (rank+1)].fill_diagonal_(0)
weights_t2i[:, bs * rank:bs * (rank+1)].fill_diagonal_(0)
else:
weights_i2t.masked_fill_(idx_all, 0)
weights_t2i.masked_fill_(idx_all, 0)
global_image_embedss = pl_module.all_gather(infer['image_embedss'].transpose(0, 1), sync_grads=True).view(-1, infer['image_embedss'].size(0), infer['image_embedss'].size(2), infer['image_embedss'].size(3))
image_embeds_neg = []
for b in range(bs):
try:
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
except:
neg_idx = torch.multinomial(weights_t2i[b] + 1e-5, 1).item()
image_embeds_neg.append(global_image_embedss[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=1)
# del global_image_embedss
# select a negative text for each image
global_text_embedss = pl_module.all_gather(infer['text_embedss'].transpose(0, 1), sync_grads=True).view(-1, infer['text_embedss'].size(0), infer['text_embedss'].size(2), infer['text_embedss'].size(3))
global_text_masks = pl_module.all_gather(infer['text_masks']).view(-1, infer['text_masks'].size(1))
text_embeds_neg = []
text_masks_neg = []
for b in range(bs):
try:
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
except:
neg_idx = torch.multinomial(weights_i2t[b] + 1e-5, 1).item()
text_embeds_neg.append(global_text_embedss[neg_idx])
text_masks_neg.append(global_text_masks[neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=1)
text_masks_neg = torch.stack(text_masks_neg, dim=0)
# del global_text_embedss, global_text_masks
else:
# select a negative image for each text
with torch.no_grad():
weights_i2t = F.softmax(itc_logits_i2t[:, bs * rank:bs * (rank+1)], dim=-1)
weights_t2i = F.softmax(itc_logits_t2i[:, bs * rank:bs * (rank+1)], dim=-1)
if pretrain:
weights_i2t.fill_diagonal_(0)
weights_t2i.fill_diagonal_(0)
else:
mask = torch.eq(idx, idx.t()).to(pl_module.device)
weights_i2t.masked_fill_(mask, 0)
weights_t2i.masked_fill_(mask, 0)
image_embeds_neg = []
for b in range(bs):
try:
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
except:
neg_idx = torch.multinomial(weights_t2i[b] + 1e-5, 1).item()
image_embeds_neg.append(infer['image_embedss'][:, neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=1)
# select a negative text for each image
text_embeds_neg = []
text_masks_neg = []
for b in range(bs):
try:
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
except:
neg_idx = torch.multinomial(weights_i2t[b] + 1e-5, 1).item()
text_embeds_neg.append(infer['text_embedss'][:, neg_idx])
text_masks_neg.append(infer["text_masks"][neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=1)
text_masks_neg = torch.stack(text_masks_neg, dim=0)
# pack the negative image-text pairs for fusion, which is 2 x batch_size
text_embedss = torch.cat([infer['text_embedss'], text_embeds_neg], dim=1)
text_masks = torch.cat([infer["text_masks"], text_masks_neg], dim=0)
extend_text_masks = pl_module.text_transformer.get_extended_attention_mask(text_masks, text_masks.size(), pl_module.device)
image_embedss = torch.cat([image_embeds_neg, infer['image_embedss']], dim=1)
# fusion
pos_cls_feats = pl_module.infer_fusion(infer['image_embedss'], infer['text_embedss'], infer['extend_text_masks'])['cls_feats']
neg_cls_feats = pl_module.infer_fusion(image_embedss, text_embedss, extend_text_masks)["cls_feats"]
cls_feats = torch.cat([pos_cls_feats, neg_cls_feats], dim=0)
itm_labels = torch.cat([
torch.ones(bs, dtype=torch.long),
torch.zeros(2 * bs, dtype=torch.long)]
).to(pl_module.device)
itm_logits = pl_module.itm_score(cls_feats)
itm_loss = F.cross_entropy(itm_logits, itm_labels)
ret = {
"itc_loss": itc_loss,
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
if pretrain:
loss_name = 'itm'
else:
loss_name = 'irtr_itm'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{split}_{loss_name}_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/accuracy", acc)
return ret
def compute_itm_itc_meter(pl_module, batch, split, pretrain=False):
# REMEMBER: No need to draw false images for image text matching in data preprocessing.
assert batch["image"][0].size(0) == len(batch["text"])
bs, rank = len(batch["text"]), torch.distributed.get_rank()
# forward the positive image-text pair
with torch.no_grad():
pl_module.temperature.clamp_(0.001, 0.5)
infer = pl_module.get_uni_modal_features(batch, fusion_features=True, itc=True)
unimodal_feats_text = infer['unimodal_feats_text']
unimodal_feats_image = infer['unimodal_feats_image']
if pl_module.hparams.config["gather_with_grads"]:
gather_unimodal_feats_text = pl_module.all_gather(unimodal_feats_text, sync_grads=True)
gather_unimodal_feats_image = pl_module.all_gather(unimodal_feats_image, sync_grads=True)
else:
with torch.no_grad():
gather_unimodal_feats_text = pl_module.all_gather(unimodal_feats_text)
gather_unimodal_feats_image = pl_module.all_gather(unimodal_feats_image)
gather_unimodal_feats_text[rank] = unimodal_feats_text
gather_unimodal_feats_image[rank] = unimodal_feats_image
gather_unimodal_feats_text = gather_unimodal_feats_text.view((-1,) + (gather_unimodal_feats_text.shape)[2:])
gather_unimodal_feats_image = gather_unimodal_feats_image.view((-1,) + (gather_unimodal_feats_image.shape)[2:])
logit_scale = torch.log(1 / pl_module.temperature).exp()
itc_logits_i2t = logit_scale * unimodal_feats_image @ gather_unimodal_feats_text.t()
itc_logits_t2i = logit_scale * unimodal_feats_text @ gather_unimodal_feats_image.t()
if pretrain:
itc_labels = torch.arange(bs).to(pl_module.device)
itc_labels = itc_labels + bs * rank
else:
idx = torch.LongTensor(batch["img_index"]).view(-1, 1).to(pl_module.device)
idx_all = pl_module.all_gather(idx).view(-1, 1)
assert idx_all.size(0) == gather_unimodal_feats_image.size(0)
idx_all = torch.eq(idx_all, idx_all.t()).to(pl_module.device)
idx_all = idx_all[bs * rank:bs * (rank+1)]
pos_idx = idx_all.float()
assert pos_idx.size(0) == len(idx)
itc_labels = pos_idx / pos_idx.sum(1, keepdim=True)
i2t_loss = F.cross_entropy(itc_logits_i2t, itc_labels)
t2i_loss = F.cross_entropy(itc_logits_t2i, itc_labels)
itc_loss = (i2t_loss + t2i_loss) / 2
if pretrain:
loss_name = 'itc'
else:
loss_name = 'irtr_itc'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(itc_loss)
pl_module.log(f"{split}/{loss_name}/loss", loss)
# sample hard negative images for image text matching from image text contrastive logits
# select a negative image for each text
with torch.no_grad():
weights_i2t = F.softmax(itc_logits_i2t[:, bs * rank:bs * (rank+1)], dim=-1)
weights_t2i = F.softmax(itc_logits_t2i[:, bs * rank:bs * (rank+1)], dim=-1)
if pretrain:
weights_i2t.fill_diagonal_(0)
weights_t2i.fill_diagonal_(0)
else:
mask = torch.eq(idx, idx.t()).to(pl_module.device)
weights_i2t.masked_fill_(mask, 0)
weights_t2i.masked_fill_(mask, 0)
image_embeds_neg = []
for b in range(bs):
try:
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
except:
neg_idx = torch.multinomial(weights_t2i[b] + 1e-5, 1).item()
image_embeds_neg.append(infer['image_embeds'][neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text for each image
text_embeds_neg = []
text_masks_neg = []
for b in range(bs):
try:
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
except:
neg_idx = torch.multinomial(weights_i2t[b] + 1e-5, 1).item()
text_embeds_neg.append(infer['text_embeds'][neg_idx])
text_masks_neg.append(infer["text_masks"][neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=0)
text_masks_neg = torch.stack(text_masks_neg, dim=0)
# pack the negative image-text pairs for fusion, which is 2 x batch_size
text_embeds = torch.cat([infer['text_embeds'], text_embeds_neg], dim=0)
text_masks = torch.cat([infer["text_masks"], text_masks_neg], dim=0)
extend_text_masks = pl_module.text_transformer.get_extended_attention_mask(text_masks, text_masks.size(), pl_module.device)
image_embeds = torch.cat([image_embeds_neg, infer['image_embeds']], dim=0)
# fusion
pos_cls_feats = pl_module.infer_fusion(infer['image_embeds'], infer['text_embeds'], infer['extend_text_masks'])['cls_feats']
neg_cls_feats = pl_module.infer_fusion(image_embeds, text_embeds, extend_text_masks)["cls_feats"]
cls_feats = torch.cat([pos_cls_feats, neg_cls_feats], dim=0)
itm_labels = torch.cat([
torch.ones(bs, dtype=torch.long),
torch.zeros(2 * bs, dtype=torch.long)]
).to(pl_module.device)
itm_logits = pl_module.itm_score(cls_feats)
itm_loss = F.cross_entropy(itm_logits, itm_labels)
ret = {
"itc_loss": itc_loss,
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
if pretrain:
loss_name = 'itm'
else:
loss_name = 'irtr_itm'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{split}_{loss_name}_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/accuracy", acc)
return ret
# fine-tune
def compute_snli(pl_module, batch, split):
infer = pl_module.infer(batch)
snli_logits = pl_module.snli_classifier(infer["cls_feats"])
snli_labels = batch["labels"]
snli_labels = torch.tensor(snli_labels).to(pl_module.device).long()
snli_loss = F.cross_entropy(snli_logits, snli_labels.view(-1))
ret = {
"snli_loss": snli_loss,
"snli_logits": snli_logits,
"snli_labels": snli_labels,
}
loss_name = 'snli'
if split == "train":
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["snli_loss"])
acc = getattr(pl_module, f"{split}_{loss_name}_accuracy")(
ret["snli_logits"], ret["snli_labels"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/accuracy", acc)
else:
val_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if val_batches:
val_loss = getattr(pl_module, f"val_{loss_name}_loss")(
F.cross_entropy(
ret["snli_logits"][val_batches], ret["snli_labels"][val_batches]
)
)
val_acc = getattr(pl_module, f"val_{loss_name}_accuracy")(
ret["snli_logits"][val_batches], ret["snli_labels"][val_batches]
)
pl_module.log(f"val/snli/loss", val_loss)
pl_module.log(f"val/snli/accuracy", val_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_{loss_name}_loss")(
F.cross_entropy(
ret["snli_logits"][test_batches], ret["snli_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_{loss_name}_accuracy")(
ret["snli_logits"][test_batches], ret["snli_labels"][test_batches]
)
pl_module.log(f"test/snli/loss", test_loss)
pl_module.log(f"test/snli/accuracy", test_acc)
return ret
def compute_vqa(pl_module, batch, split):
infer = pl_module.infer(batch)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
loss_name = 'vqa'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{split}_{loss_name}_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/score", score)
return ret
def compute_nlvr2(pl_module, batch, split):
infer1 = pl_module.infer(batch, image_token_type_idx=1)
infer2 = pl_module.infer(batch, image_token_type_idx=2)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
if pl_module.hparams.config["nlvr2_drop_rate"] > 0:
cls_feats = pl_module.nlvr2_classifier_dropout(cls_feats)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels.view(-1))
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
loss_name = 'nlvr2'
if split == "train":
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{split}_{loss_name}_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"{split}/{loss_name}/loss", loss)
pl_module.log(f"{split}/{loss_name}/accuracy", acc)
else:
val_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if val_batches:
val_loss = getattr(pl_module, f"val_{loss_name}_loss")(
F.cross_entropy(
ret["nlvr2_logits"][val_batches], ret["nlvr2_labels"][val_batches]
)
)
val_acc = getattr(pl_module, f"val_{loss_name}_accuracy")(
ret["nlvr2_logits"][val_batches], ret["nlvr2_labels"][val_batches]
)
pl_module.log(f"val/nlvr2/loss", val_loss)
pl_module.log(f"val/nlvr2/accuracy", val_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_{loss_name}_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_{loss_name}_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"test/nlvr2/loss", test_loss)
pl_module.log(f"test/nlvr2/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch, split):
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.stack([batch["text_ids"], text_ids], dim=1)
text_masks = torch.stack([batch["text_masks"], text_masks], dim=1)
text_labels = torch.stack([batch["text_labels"], text_labels], dim=1)
infer = pl_module.infer(
{
"image": batch["image"],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
},
irtr_len_text=false_len+1,
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
loss_name = 'irtr'
loss = getattr(pl_module, f"{split}_{loss_name}_loss")(ret["irtr_loss"])
pl_module.log(f"{split}/{loss_name}/loss", loss)
return ret
## calculate recall for irtr task
@torch.no_grad()
def compute_irtr_recall(pl_module, split):
print("[Evaluation] load irtr dataset for text features caching")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split)
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_dist_sampler = DistributedSampler(text_dset, shuffle=False)
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_text"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=text_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] load irtr dataset for image features caching")
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split, image_only=True)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_image"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=image_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] start to cache the text features")
text_embedss_cache, extend_text_masks_cache, tiids = list(), list(), list()
for _b in tqdm(text_loader, desc="text prefetch loop"):
text_embedss, extend_text_masks = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
)
text_embedss_cache.append(text_embedss)
extend_text_masks_cache.append(extend_text_masks)
tiids += _b["img_index"]
text_embedss_cache = torch.cat(text_embedss_cache, dim=1)
extend_text_masks_cache = torch.cat(extend_text_masks_cache, dim=0)
tiids = torch.LongTensor(tiids)
# gather all text features
if torch.distributed.is_initialized():
torch.distributed.barrier()
text_embedss_cache = pl_module.all_gather(text_embedss_cache.transpose(0, 1)).to(pl_module.device).view(-1, text_embedss_cache.size(0), text_embedss_cache.size(2), text_embedss_cache.size(3)).transpose(0, 1)
extend_text_masks_cache = pl_module.all_gather(extend_text_masks_cache).to(pl_module.device).view(-1, extend_text_masks_cache.size(1), extend_text_masks_cache.size(2), extend_text_masks_cache.size(3))
tiids = pl_module.all_gather(tiids).to(pl_module.device).view(-1)
print("[Evaluation] start to cache the image features")
image_embedss_cache, iids_cache = list(), list()
for _b in tqdm(image_loader, desc="image prefetch loop"):
image_embedss = pl_module.infer_image(img=_b["image"][0].to(pl_module.device))
image_embedss_cache.append(image_embedss)
iids_cache += _b["img_index"]
image_embedss_cache = torch.cat(image_embedss_cache, dim=1)
image_index, rank_scores, rank_iids = 0, list(), list()
text_chunk_size = pl_module.hparams.config["per_gpu_eval_batchsize_fusion_text"]
if text_embedss_cache.size(1) % text_chunk_size == 0:
text_chunk_num = text_embedss_cache.size(1) // text_chunk_size
else:
text_chunk_num = text_embedss_cache.size(1) // text_chunk_size + 1
print("[Evaluation] start to compute the irtr recall")
for _iid in tqdm(iids_cache, desc="rank loop"):
image_embedss = image_embedss_cache[:, image_index]
image_index += 1
img_batch_score = list()
for _i in range(text_chunk_num):
text_embedss = text_embedss_cache[:, _i*text_chunk_size:(_i+1)*text_chunk_size]
extend_text_masks = extend_text_masks_cache[_i*text_chunk_size:(_i+1)*text_chunk_size]
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer_fusion(
image_embedss,
text_embedss,
extend_text_masks,
irtr_len_image=text_embedss.size(1),
)["cls_feats"]
)[:, 0]
else:
score = pl_module.rank_output(
pl_module.infer_fusion(
image_embedss,
text_embedss,
extend_text_masks,
irtr_len_image=text_embedss.size(1),
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score)
rank_iids.append(_iid)
rank_iids = torch.LongTensor(rank_iids)
rank_scores = torch.cat(rank_scores, dim=0)
if torch.distributed.is_initialized():
torch.distributed.barrier()
iids = pl_module.all_gather(rank_iids).to(pl_module.device).view(-1)
scores = pl_module.all_gather(rank_scores).to(pl_module.device).view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
r_mean = (tr_mean + ir_mean) / 2
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10, ir_mean, tr_mean, r_mean)
@torch.no_grad()
def compute_irtr_itm_itc_recall(pl_module, split):
print("[Evaluation] load irtr dataset for text features caching")
torch.cuda.empty_cache()
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split)
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_dist_sampler = DistributedSampler(text_dset, shuffle=False)
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_text"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=text_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] load irtr dataset for image features caching")
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split, image_only=True)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_image"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=image_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] start to cache the text features")
text_embedss_cache, extend_text_masks_cache, unimodal_feats_text_cache, tiids = list(), list(), list(), list()
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
for _b in tqdm(text_loader, desc="text prefetch loop"):
text_embedss, extend_text_masks, unimodal_feats_text = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
itc=True,
)
text_embedss_cache.append(text_embedss)
unimodal_feats_text_cache.append(unimodal_feats_text)
extend_text_masks_cache.append(extend_text_masks)
tiids += _b["img_index"]
else:
for _b in tqdm(text_loader, desc="text prefetch loop"):
text_embedss, extend_text_masks, unimodal_feats_text = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
itc=True,
)
text_embedss_cache.append(text_embedss)
unimodal_feats_text_cache.append(unimodal_feats_text)
extend_text_masks_cache.append(extend_text_masks)
tiids += _b["img_index"]
text_embedss_cache = torch.cat(text_embedss_cache, dim=1)
unimodal_feats_text_cache = torch.cat(unimodal_feats_text_cache, dim=0)
extend_text_masks_cache = torch.cat(extend_text_masks_cache, dim=0)
tiids = torch.LongTensor(tiids)
print("[Evaluation] gather all texts")
if torch.distributed.is_initialized():
torch.distributed.barrier()
text_embedss_cache = pl_module.all_gather(text_embedss_cache.transpose(0, 1)).to(pl_module.device).view(-1, text_embedss_cache.size(0), text_embedss_cache.size(2), text_embedss_cache.size(3)).transpose(0, 1)
unimodal_feats_text_cache = pl_module.all_gather(unimodal_feats_text_cache).view(-1, unimodal_feats_text_cache.size(1)).to(pl_module.device)
extend_text_masks_cache = pl_module.all_gather(extend_text_masks_cache).to(pl_module.device).view(-1, extend_text_masks_cache.size(1), extend_text_masks_cache.size(2), extend_text_masks_cache.size(3))
tiids = pl_module.all_gather(tiids).to(pl_module.device).view(-1)
print("[Evaluation] start to cache the image features")
image_embedss_cache, unimodal_feats_image_cache, iids_cache = list(), list(), list()
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache = list()
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
for _b in tqdm(image_loader, desc="image prefetch loop"):
img_input = _b["image"][0].to(pl_module.device)
image_embedss, unimodal_feats_image = pl_module.infer_image(img=img_input, itc=True)
image_embedss_cache.append(image_embedss)
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache.append(img_input)
unimodal_feats_image_cache.append(unimodal_feats_image)
iids_cache += _b["img_index"]
else:
for _b in tqdm(image_loader, desc="image prefetch loop"):
img_input = _b["image"][0].to(pl_module.device)
image_embedss, unimodal_feats_image = pl_module.infer_image(img=img_input, itc=True)
image_embedss_cache.append(image_embedss)
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache.append(img_input)
unimodal_feats_image_cache.append(unimodal_feats_image)
iids_cache += _b["img_index"]
image_embedss_cache = torch.cat(image_embedss_cache, dim=1)
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache = torch.cat(img_input_cache, dim=0)
unimodal_feats_image_cache = torch.cat(unimodal_feats_image_cache, dim=0)
# top-k contrastive scores
print("[Evaluation] start to compute the irtr recall")
print("[Evaluation] start image-to-text")
sims_matrix = unimodal_feats_image_cache @ unimodal_feats_text_cache.t()
_, topk_idx = sims_matrix.topk(k=pl_module.hparams.config['k_test'], dim=1)
torch.cuda.empty_cache()
image_index, rank_scores, rank_iids = 0, list(), list()
for _iid in tqdm(iids_cache, desc="image-to-text rank loop"):
topk_idx_i = topk_idx[image_index]
image_embedss = image_embedss_cache[:, image_index]
text_embedss = text_embedss_cache[:, topk_idx_i]
extend_text_masks = extend_text_masks_cache[topk_idx_i]
if pl_module.hparams.config["image_chunks"] >= 2:
text_embedss = torch.chunk(text_embedss, pl_module.hparams.config["text_chunks"], dim=1)
extend_text_masks = torch.chunk(extend_text_masks, pl_module.hparams.config["text_chunks"], dim=0)
score_list, img_batch_score = [], None
for text_embedss_, extend_text_masks_ in zip(text_embedss, extend_text_masks):
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss,
text_embedss_,
extend_text_masks_,
irtr_len_image=text_embedss_.size(1),
)["cls_feats"]
)[:, 1]
if img_batch_score is None:
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
score_list.append(score)
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss,
text_embedss_,
extend_text_masks_,
irtr_len_image=text_embedss_.size(1),
)["cls_feats"]
)[:, 1]
if img_batch_score is None:
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
score_list.append(score)
img_batch_score[topk_idx_i] = torch.cat(score_list, dim=0)
else:
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss,
text_embedss,
extend_text_masks,
irtr_len_image=text_embedss.size(1),
)["cls_feats"]
)[:, 1]
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
img_batch_score[topk_idx_i] = score
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss,
text_embedss,
extend_text_masks,
irtr_len_image=text_embedss.size(1),
)["cls_feats"]
)[:, 1]
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
img_batch_score[topk_idx_i] = score
rank_scores.append(img_batch_score)
rank_iids.append(_iid)
image_index += 1
rank_iids = torch.LongTensor(rank_iids)
rank_scores = torch.cat(rank_scores, dim=0)
print("[Evaluation] start text-to-image")
unimodal_feats_image_cache = pl_module.all_gather(unimodal_feats_image_cache).to(pl_module.device).view(-1, unimodal_feats_image_cache.size(1))
sims_matrix = unimodal_feats_image_cache @ unimodal_feats_text_cache.t()
_, topk_idx = sims_matrix.topk(k=pl_module.hparams.config['k_test'], dim=0)
rank = torch.distributed.get_rank()
del unimodal_feats_image_cache, unimodal_feats_text_cache
import gc
gc.collect()
torch.cuda.empty_cache()
print("[Evaluation] gather all images")
# if out of memory, then let's gather all the image input and rerun the vision part, but slower 4~5 times
if text_embedss_cache.size(1) % torch.distributed.get_world_size() == 0:
step = text_embedss_cache.size(1) // torch.distributed.get_world_size()
else:
step = text_embedss_cache.size(1) // torch.distributed.get_world_size() + 1
start = rank * step
end = min(text_embedss_cache.size(1), (rank + 1) * step)
text_embedss_cache = text_embedss_cache[:, start:end]
extend_text_masks_cache = extend_text_masks_cache[start:end]
# topk_idx = topk_idx[:, start:end]
if pl_module.hparams.config["gather_all_image_inputs"]:
if not pl_module.hparams.config["save_memory"]:
img_input_cache = pl_module.all_gather(img_input_cache).to(pl_module.device).view(-1, img_input_cache.size(1), img_input_cache.size(2), img_input_cache.size(3))
else:
useful_num = topk_idx.tolist()
print(len(useful_num), len(useful_num[0]))
useful_num = [item for sublist in useful_num for item in sublist]
useful_num = set(useful_num)
print(len(useful_num))
all_idx_matrix = torch.zeros(sims_matrix.size(0)).long().to(pl_module.device)
for i in range(topk_idx.size(1)):
all_idx_matrix[topk_idx[:, i]] = 1
image_input_list, image_input_idx_list = [], []
current_image_num = sims_matrix.size(0) // dist.get_world_size()
for i in range(current_image_num):
j = i + current_image_num * rank
if all_idx_matrix[j] == 1:
image_input_list.append(img_input_cache[i])
image_input_idx_list.append(j)
image_input_list = torch.stack(image_input_list, dim=0)
image_input_idx_list = torch.LongTensor(image_input_idx_list)
img_input_cache = image_input_list
gather_img_input_cache = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(gather_img_input_cache, img_input_cache)
gather_img_input_cache = [i.to(pl_module.device) for i in gather_img_input_cache]
gather_img_input_cache = torch.cat(gather_img_input_cache, dim=0)
img_input_cache = gather_img_input_cache
gather_image_input_idx_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(gather_image_input_idx_list, image_input_idx_list)
gather_image_input_idx_list = [i.to(pl_module.device) for i in gather_image_input_idx_list]
gather_image_input_idx_list = torch.cat(gather_image_input_idx_list, dim=0)
image_input_idx_list = gather_image_input_idx_list
print(img_input_cache.shape, image_input_idx_list.shape)
inverse_img_input_idx = torch.zeros(sims_matrix.size(0)).long().fill_(-1).to(pl_module.device)
for i in range(image_input_idx_list.size(0)):
inverse_img_input_idx[image_input_idx_list[i]] = i
else:
if not pl_module.hparams.config["save_memory"]:
image_embedss_cache = pl_module.all_gather(image_embedss_cache.transpose(0, 1)).to(pl_module.device).view(-1, image_embedss_cache.size(0), image_embedss_cache.size(2), image_embedss_cache.size(3)).transpose(0, 1)
else:
useful_num = topk_idx.tolist()
print(len(useful_num), len(useful_num[0]))
useful_num = [item for sublist in useful_num for item in sublist]
useful_num = set(useful_num)
print(len(useful_num))
all_idx_matrix = torch.zeros(sims_matrix.size(0)).long().to(pl_module.device)
for i in range(topk_idx.size(1)):
all_idx_matrix[topk_idx[:, i]] = 1
# current_idx_matrix = torch.zeros(sims_matrix.size(0))
# for i in range(end-start):
# current_idx_matrix[topk_idx[:, i]] = 1
image_embedss_cache = image_embedss_cache.transpose(0, 1)
image_embedss_list, image_embedss_idx_list = [], []
current_image_num = sims_matrix.size(0) // dist.get_world_size()
for i in range(current_image_num):
j = i + current_image_num * rank
if all_idx_matrix[j] == 1:
image_embedss_list.append(image_embedss_cache[i])
image_embedss_idx_list.append(j)
image_embedss_list = torch.stack(image_embedss_list, dim=0)
image_embedss_idx_list = torch.LongTensor(image_embedss_idx_list)
image_embedss_cache = image_embedss_list
gather_image_embedss_cache = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(gather_image_embedss_cache, image_embedss_cache)
gather_image_embedss_cache = [i.to(pl_module.device) for i in gather_image_embedss_cache]
gather_image_embedss_cache = torch.cat(gather_image_embedss_cache, dim=0)
image_embedss_cache = gather_image_embedss_cache
gather_image_embedss_idx_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(gather_image_embedss_idx_list, image_embedss_idx_list)
gather_image_embedss_idx_list = [i.to(pl_module.device) for i in gather_image_embedss_idx_list]
gather_image_embedss_idx_list = torch.cat(gather_image_embedss_idx_list, dim=0)
image_embedss_idx_list = gather_image_embedss_idx_list
print(image_embedss_cache.shape, image_embedss_idx_list.shape)
image_embedss_cache = image_embedss_cache.transpose(0, 1)
inverse_image_embedss_idx = torch.zeros(sims_matrix.size(0)).long().fill_(-1).to(pl_module.device)
for i in range(image_embedss_idx_list.size(0)):
inverse_image_embedss_idx[image_embedss_idx_list[i]] = i
topk_idx = topk_idx[:, start:end]
txt_rank_scores = list()
for text_index in tqdm(range(end-start), desc="text-to-image rank loop"):
topk_idx_i = topk_idx[:, text_index]
if pl_module.hparams.config["gather_all_image_inputs"]:
if pl_module.hparams.config["save_memory"]:
img_input = img_input_cache[inverse_img_input_idx[topk_idx_i]]
else:
img_input = img_input_cache[topk_idx_i]
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
image_embedss = pl_module.infer_image(img=img_input)
else:
image_embedss = pl_module.infer_image(img=img_input)
else:
if pl_module.hparams.config["save_memory"]:
image_embedss = image_embedss_cache[:, inverse_image_embedss_idx[topk_idx_i]]
else:
image_embedss = image_embedss_cache[:, topk_idx_i]
text_embedss = text_embedss_cache[:, text_index]
extend_text_masks = extend_text_masks_cache[text_index].unsqueeze_(0).expand(image_embedss.size(1), extend_text_masks_cache.size(1), extend_text_masks_cache.size(2), extend_text_masks_cache.size(3))
if pl_module.hparams.config["image_chunks"] >= 2:
image_embedss = torch.chunk(image_embedss, pl_module.hparams.config["image_chunks"], dim=1)
extend_text_masks = torch.chunk(extend_text_masks, pl_module.hparams.config["image_chunks"], dim=0)
score_list, txt_batch_score = [], None
for image_embedss_, extend_text_masks_ in zip(image_embedss, extend_text_masks):
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss_,
text_embedss,
extend_text_masks_,
irtr_len_text=image_embedss_.size(1),
)["cls_feats"]
)[:, 1]
if txt_batch_score is None:
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
score_list.append(score)
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss_,
text_embedss,
extend_text_masks_,
irtr_len_text=image_embedss_.size(1),
)["cls_feats"]
)[:, 1]
if txt_batch_score is None:
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
score_list.append(score)
txt_batch_score[topk_idx_i] = torch.cat(score_list, dim=0)
else:
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss,
text_embedss,
extend_text_masks,
irtr_len_text=image_embedss.size(1),
)["cls_feats"]
)[:, 1]
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
txt_batch_score[topk_idx_i] = score
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embedss,
text_embedss,
extend_text_masks,
irtr_len_text=image_embedss.size(1),
)["cls_feats"]
)[:, 1]
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
txt_batch_score[topk_idx_i] = score
txt_rank_scores.append(txt_batch_score)
txt_rank_scores = torch.cat(txt_rank_scores, dim=0)
if torch.distributed.is_initialized():
torch.distributed.barrier()
iids = pl_module.all_gather(rank_iids).to(pl_module.device).view(-1)
img_scores = pl_module.all_gather(rank_scores).to(pl_module.device).view(len(iids), -1)
txt_scores = pl_module.all_gather(txt_rank_scores).to(pl_module.device).view(-1, len(iids)).t()
scores = torch.stack((img_scores, txt_scores), dim=-1)
scores = torch.max(scores, dim=-1)[0]
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
r_mean = (tr_mean + ir_mean) / 2
del text_embedss_cache, extend_text_masks_cache, image_embedss_cache
if pl_module.hparams.config["gather_all_image_inputs"]:
del img_input_cache
import gc
gc.collect()
torch.cuda.empty_cache()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10, ir_mean, tr_mean, r_mean)
@torch.no_grad()
def compute_irtr_itm_itc_recall_meter(pl_module, split):
print("[Evaluation] load irtr dataset for text features caching")
torch.cuda.empty_cache()
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split)
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_dist_sampler = DistributedSampler(text_dset, shuffle=False)
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_text"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=text_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] load irtr dataset for image features caching")
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split, image_only=True)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_image"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=image_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] start to cache the text features")
text_embeds_cache, extend_text_masks_cache, unimodal_feats_text_cache, tiids = list(), list(), list(), list()
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
for _b in tqdm(text_loader, desc="text prefetch loop"):
text_embeds, extend_text_masks, unimodal_feats_text = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
itc=True,
)
text_embeds_cache.append(text_embeds)
unimodal_feats_text_cache.append(unimodal_feats_text)
extend_text_masks_cache.append(extend_text_masks)
tiids += _b["img_index"]
else:
for _b in tqdm(text_loader, desc="text prefetch loop"):
text_embeds, extend_text_masks, unimodal_feats_text = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
itc=True,
)
text_embeds_cache.append(text_embeds)
unimodal_feats_text_cache.append(unimodal_feats_text)
extend_text_masks_cache.append(extend_text_masks)
tiids += _b["img_index"]
text_embeds_cache = torch.cat(text_embeds_cache, dim=0)
unimodal_feats_text_cache = torch.cat(unimodal_feats_text_cache, dim=0)
extend_text_masks_cache = torch.cat(extend_text_masks_cache, dim=0)
tiids = torch.LongTensor(tiids)
print("[Evaluation] gather all texts")
if torch.distributed.is_initialized():
torch.distributed.barrier()
text_embeds_cache = pl_module.all_gather(text_embeds_cache).to(pl_module.device).view(-1, text_embeds_cache.size(1), text_embeds_cache.size(2))
unimodal_feats_text_cache = pl_module.all_gather(unimodal_feats_text_cache).view(-1, unimodal_feats_text_cache.size(1)).to(pl_module.device)
extend_text_masks_cache = pl_module.all_gather(extend_text_masks_cache).to(pl_module.device).view(-1, extend_text_masks_cache.size(1), extend_text_masks_cache.size(2), extend_text_masks_cache.size(3))
tiids = pl_module.all_gather(tiids).to(pl_module.device).view(-1)
print("[Evaluation] start to cache the image features")
image_embeds_cache, unimodal_feats_image_cache, iids_cache = list(), list(), list()
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache = list()
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
for _b in tqdm(image_loader, desc="image prefetch loop"):
img_input = _b["image"][0].to(pl_module.device)
image_embeds, unimodal_feats_image = pl_module.infer_image(img=img_input, itc=True)
image_embeds_cache.append(image_embeds)
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache.append(img_input)
unimodal_feats_image_cache.append(unimodal_feats_image)
iids_cache += _b["img_index"]
else:
for _b in tqdm(image_loader, desc="image prefetch loop"):
img_input = _b["image"][0].to(pl_module.device)
image_embeds, unimodal_feats_image = pl_module.infer_image(img=img_input, itc=True)
image_embeds_cache.append(image_embeds)
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache.append(img_input)
unimodal_feats_image_cache.append(unimodal_feats_image)
iids_cache += _b["img_index"]
image_embeds_cache = torch.cat(image_embeds_cache, dim=0)
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache = torch.cat(img_input_cache, dim=0)
unimodal_feats_image_cache = torch.cat(unimodal_feats_image_cache, dim=0)
# top-k contrastive scores
print("[Evaluation] start to compute the irtr recall")
print("[Evaluation] start image-to-text")
sims_matrix = unimodal_feats_image_cache @ unimodal_feats_text_cache.t()
_, topk_idx = sims_matrix.topk(k=pl_module.hparams.config['k_test'], dim=1)
torch.cuda.empty_cache()
image_index, rank_scores, rank_iids = 0, list(), list()
for _iid in tqdm(iids_cache, desc="image-to-text rank loop"):
topk_idx_i = topk_idx[image_index]
image_embeds = image_embeds_cache[image_index]
text_embeds = text_embeds_cache[topk_idx_i]
extend_text_masks = extend_text_masks_cache[topk_idx_i]
if pl_module.hparams.config["image_chunks"] >= 2:
text_embeds = torch.chunk(text_embeds, pl_module.hparams.config["text_chunks"], dim=0)
extend_text_masks = torch.chunk(extend_text_masks, pl_module.hparams.config["text_chunks"], dim=0)
score_list, img_batch_score = [], None
for text_embeds_, extend_text_masks_ in zip(text_embeds, extend_text_masks):
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds,
text_embeds_,
extend_text_masks_,
irtr_len_image=text_embeds_.size(0),
)["cls_feats"]
)[:, 1]
if img_batch_score is None:
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
score_list.append(score)
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds,
text_embeds_,
extend_text_masks_,
irtr_len_image=text_embeds_.size(0),
)["cls_feats"]
)[:, 1]
if img_batch_score is None:
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
score_list.append(score)
img_batch_score[topk_idx_i] = torch.cat(score_list, dim=0)
else:
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds,
text_embeds,
extend_text_masks,
irtr_len_image=text_embeds.size(0),
)["cls_feats"]
)[:, 1]
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
img_batch_score[topk_idx_i] = score
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds,
text_embeds,
extend_text_masks,
irtr_len_image=text_embeds.size(0),
)["cls_feats"]
)[:, 1]
img_batch_score = torch.full((sims_matrix.size(1),), -100.0, dtype=score.dtype, device=pl_module.device)
img_batch_score[topk_idx_i] = score
rank_scores.append(img_batch_score)
rank_iids.append(_iid)
image_index += 1
rank_iids = torch.LongTensor(rank_iids)
rank_scores = torch.cat(rank_scores, dim=0)
print("[Evaluation] start text-to-image")
unimodal_feats_image_cache = pl_module.all_gather(unimodal_feats_image_cache).to(pl_module.device).view(-1, unimodal_feats_image_cache.size(1))
sims_matrix = unimodal_feats_image_cache @ unimodal_feats_text_cache.t()
_, topk_idx = sims_matrix.topk(k=pl_module.hparams.config['k_test'], dim=0)
rank = torch.distributed.get_rank()
del unimodal_feats_image_cache, unimodal_feats_text_cache
import gc
gc.collect()
torch.cuda.empty_cache()
print("[Evaluation] gather all images")
# if out of memory, then let's gather all the image input and rerun the vision part, but slower 4~5 times
if text_embeds_cache.size(0) % torch.distributed.get_world_size() == 0:
step = text_embeds_cache.size(0) // torch.distributed.get_world_size()
else:
step = text_embeds_cache.size(0) // torch.distributed.get_world_size() + 1
start = rank * step
end = min(text_embeds_cache.size(0), (rank + 1) * step)
text_embeds_cache = text_embeds_cache[start:end]
extend_text_masks_cache = extend_text_masks_cache[start:end]
topk_idx = topk_idx[:, start:end]
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input_cache = pl_module.all_gather(img_input_cache).to(pl_module.device).view(-1, img_input_cache.size(1), img_input_cache.size(2), img_input_cache.size(3))
else:
image_embeds_cache = pl_module.all_gather(image_embeds_cache).to(pl_module.device).view(-1, image_embeds_cache.size(1), image_embeds_cache.size(2))
txt_rank_scores = list()
for text_index in tqdm(range(end-start), desc="text-to-image rank loop"):
topk_idx_i = topk_idx[:, text_index]
if pl_module.hparams.config["gather_all_image_inputs"]:
img_input = img_input_cache[topk_idx_i]
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
image_embeds = pl_module.infer_image(img=img_input)
else:
image_embeds = pl_module.infer_image(img=img_input)
else:
image_embeds = image_embeds_cache[topk_idx_i]
text_embeds = text_embeds_cache[text_index]
extend_text_masks = extend_text_masks_cache[text_index].unsqueeze_(0).expand(image_embeds.size(0), extend_text_masks_cache.size(1), extend_text_masks_cache.size(2), extend_text_masks_cache.size(3))
if pl_module.hparams.config["image_chunks"] >= 2:
image_embeds = torch.chunk(image_embeds, pl_module.hparams.config["image_chunks"], dim=0)
extend_text_masks = torch.chunk(extend_text_masks, pl_module.hparams.config["image_chunks"], dim=0)
score_list, txt_batch_score = [], None
for image_embeds_, extend_text_masks_ in zip(image_embeds, extend_text_masks):
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds_,
text_embeds,
extend_text_masks_,
irtr_len_text=image_embeds_.size(0),
)["cls_feats"]
)[:, 1]
if txt_batch_score is None:
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
score_list.append(score)
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds_,
text_embeds,
extend_text_masks_,
irtr_len_text=image_embeds_.size(0),
)["cls_feats"]
)[:, 1]
if txt_batch_score is None:
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
score_list.append(score)
txt_batch_score[topk_idx_i] = torch.cat(score_list, dim=0)
else:
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds,
text_embeds,
extend_text_masks,
irtr_len_text=image_embeds.size(0),
)["cls_feats"]
)[:, 1]
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
txt_batch_score[topk_idx_i] = score
else:
score = pl_module.itm_score(
pl_module.infer_fusion(
image_embeds,
text_embeds,
extend_text_masks,
irtr_len_text=image_embeds.size(0),
)["cls_feats"]
)[:, 1]
txt_batch_score = torch.full((sims_matrix.size(0),), -100.0, dtype=score.dtype,device=pl_module.device)
txt_batch_score[topk_idx_i] = score
txt_rank_scores.append(txt_batch_score)
txt_rank_scores = torch.cat(txt_rank_scores, dim=0)
if torch.distributed.is_initialized():
torch.distributed.barrier()
iids = pl_module.all_gather(rank_iids).to(pl_module.device).view(-1)
img_scores = pl_module.all_gather(rank_scores).to(pl_module.device).view(len(iids), -1)
txt_scores = pl_module.all_gather(txt_rank_scores).to(pl_module.device).view(-1, len(iids)).t()
scores = torch.stack((img_scores, txt_scores), dim=-1)
scores = torch.max(scores, dim=-1)[0]
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
r_mean = (tr_mean + ir_mean) / 2
del text_embeds_cache, extend_text_masks_cache, image_embeds_cache
if pl_module.hparams.config["gather_all_image_inputs"]:
del img_input_cache
import gc
gc.collect()
torch.cuda.empty_cache()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10, ir_mean, tr_mean, r_mean)
@torch.no_grad()
def compute_irtr_itc_recall(pl_module, split):
print("[Evaluation] load irtr dataset for text features caching")
torch.cuda.empty_cache()
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split)
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_dist_sampler = DistributedSampler(text_dset, shuffle=False)
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_text"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=text_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] load irtr dataset for image features caching")
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_dset(split, image_only=True)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=pl_module.hparams.config["per_gpu_eval_batchsize_image"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=image_dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
shuffle=False,
)
print("[Evaluation] start to cache the text features")
unimodal_feats_text_cache, tiids = list(), list()
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
for _b in tqdm(text_loader, desc="text prefetch loop"):
unimodal_feats_text = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
itc=True,
)[2]
unimodal_feats_text_cache.append(unimodal_feats_text)
tiids += _b["img_index"]
else:
for _b in tqdm(text_loader, desc="text prefetch loop"):
unimodal_feats_text = pl_module.infer_text(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
},
itc=True,
)[2]
unimodal_feats_text_cache.append(unimodal_feats_text)
tiids += _b["img_index"]
unimodal_feats_text_cache = torch.cat(unimodal_feats_text_cache, dim=0)
tiids = torch.LongTensor(tiids)
print("[Evaluation] gather all texts")
if torch.distributed.is_initialized():
torch.distributed.barrier()
unimodal_feats_text_cache = pl_module.all_gather(unimodal_feats_text_cache).view(-1, unimodal_feats_text_cache.size(1)).to(pl_module.device)
tiids = pl_module.all_gather(tiids).to(pl_module.device).view(-1)
print("[Evaluation] start to cache the image features")
unimodal_feats_image_cache, iids_cache = list(), list()
if pl_module.hparams.config["amp_flag"]:
with torch.cuda.amp.autocast():
for _b in tqdm(image_loader, desc="image prefetch loop"):
img_input = _b["image"][0].to(pl_module.device)
unimodal_feats_image = pl_module.infer_image(img=img_input, itc=True)[1]
unimodal_feats_image_cache.append(unimodal_feats_image)
iids_cache += _b["img_index"]
else:
for _b in tqdm(image_loader, desc="image prefetch loop"):
img_input = _b["image"][0].to(pl_module.device)
unimodal_feats_image = pl_module.infer_image(img=img_input, itc=True)[1]
unimodal_feats_image_cache.append(unimodal_feats_image)
iids_cache += _b["img_index"]
unimodal_feats_image_cache = torch.cat(unimodal_feats_image_cache, dim=0)
torch.cuda.empty_cache()
print("[Evaluation] start to compute the itc recall")
sims_matrix = unimodal_feats_image_cache @ unimodal_feats_text_cache.t()
rank_iids = torch.LongTensor(iids_cache)
if torch.distributed.is_initialized():
torch.distributed.barrier()
sims_matrix = pl_module.all_gather(sims_matrix).view(-1, sims_matrix.size(1)).to(pl_module.device)
iids = pl_module.all_gather(rank_iids).to(pl_module.device).view(-1)
scores = sims_matrix
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
r_mean = (tr_mean + ir_mean) / 2
torch.cuda.empty_cache()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10, ir_mean, tr_mean, r_mean)
## save vqa test results to json file, then you can manually upload it to the evalai server
def vqa_test_step(pl_module, batch, output):
try:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
except:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["gqa_test"].id2answer
if "gqa_test" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["gqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": True}
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": False}
def vqa_test_wrapup(outs, model_name, log_dir):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
gqa = False
for out in outs:
qids += out["qids"]
preds += out["preds"]
gqa = out['gqa']
rets = list()
for qid, pred in zip(qids, preds):
if gqa:
rets.append({"questionId": qid, "prediction": pred})
else:
rets.append({"question_id": qid, "answer": pred})
if torch.distributed.is_initialized():
torch.distributed.barrier()
print(f'rank: {rank}, world_size: {dist.get_world_size()}, length of rets: {len(rets)}')
gather_rets = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(gather_rets, rets)
print(f'rank: {rank}, length of gather_rets: {len(gather_rets)}')
print(f'rank: {rank}, length of gather_rets[0]: {len(gather_rets[0])}')
if rank == 0:
jsons = list()
for rets_ in gather_rets:
jsons += rets_
with open(f"{log_dir}/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
if torch.distributed.is_initialized():
torch.distributed.barrier()
|
BridgeTower/src/modules/objectives.py/0
|
{
"file_path": "BridgeTower/src/modules/objectives.py",
"repo_id": "BridgeTower",
"token_count": 41224
}
| 156 |
import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from tqdm.contrib import tzip
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
with open(f"{root}/sbu-captions-all.json", "r") as fp:
data = json.load(fp)
captions = data['captions']
urls = data['image_urls']
iid2captions = dict()
for cap, url in tzip(captions, urls):
iid = url.split("/")[-1]
iid2captions[iid] = [cap]
paths = list(glob(f"{root}/SBU/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(bs, columns=["image", "caption", "image_id", "split"],)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/sbu_{sub}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect()
make_arrow('~/BT/dataset/sbu', '~/BT/dataset/pre-train')
|
BridgeTower/src/utils/write_sbu.py/0
|
{
"file_path": "BridgeTower/src/utils/write_sbu.py",
"repo_id": "BridgeTower",
"token_count": 900
}
| 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer
class ConvEncoder(BaseNetwork):
""" Same architecture as the image discriminator """
def __init__(self, opt):
super().__init__()
kw = 3
pw = int(np.ceil((kw - 1.0) / 2))
ndf = opt.ngf
norm_layer = get_nonspade_norm_layer(opt, opt.norm_E)
self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw))
self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw))
self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw))
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw))
self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw))
if opt.crop_size >= 256:
self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw))
self.so = s0 = 4
self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256)
self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256)
self.actvn = nn.LeakyReLU(0.2, False)
self.opt = opt
def forward(self, x):
if x.size(2) != 256 or x.size(3) != 256:
x = F.interpolate(x, size=(256, 256), mode="bilinear")
x = self.layer1(x)
x = self.layer2(self.actvn(x))
x = self.layer3(self.actvn(x))
x = self.layer4(self.actvn(x))
x = self.layer5(self.actvn(x))
if self.opt.crop_size >= 256:
x = self.layer6(self.actvn(x))
x = self.actvn(x)
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/encoder.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/encoder.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 942
}
| 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
def create_model(opt):
if opt.model == "pix2pixHD":
from .pix2pixHD_model import Pix2PixHDModel, InferenceModel
if opt.isTrain:
model = Pix2PixHDModel()
else:
model = InferenceModel()
else:
from .ui_model import UIModel
model = UIModel()
model.initialize(opt)
if opt.verbose:
print("model [%s] was created" % (model.name()))
if opt.isTrain and len(opt.gpu_ids) > 1:
# pass
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
return model
def create_da_model(opt):
if opt.model == 'pix2pixHD':
from .pix2pixHD_model_DA import Pix2PixHDModel, InferenceModel
if opt.isTrain:
model = Pix2PixHDModel()
else:
model = InferenceModel()
else:
from .ui_model import UIModel
model = UIModel()
model.initialize(opt)
if opt.verbose:
print("model [%s] was created" % (model.name()))
if opt.isTrain and len(opt.gpu_ids) > 1:
#pass
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
return model
|
Bringing-Old-Photos-Back-to-Life/Global/models/models.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/models/models.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 560
}
| 159 |
# Ke Chen
# [email protected]
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
# Model Core
# below codes are based and referred from https://github.com/microsoft/Swin-Transformer
# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
import math
import random
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from itertools import repeat
from .pytorch_utils import do_mixup, interpolate
from . import config
import collections.abc
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, patch_stride = 16):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patch_stride = to_2tuple(patch_stride)
self.img_size = img_size
self.patch_size = patch_size
self.patch_stride = patch_stride
self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.in_chans = in_chans
self.embed_dim = embed_dim
padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
# below codes are based and referred from https://github.com/microsoft/Swin-Transformer
# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
def extra_repr(self):
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
# We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.norm_before_mlp = norm_before_mlp
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if self.norm_before_mlp == 'ln':
self.norm2 = nn.LayerNorm(dim)
elif self.norm_before_mlp == 'bn':
self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2)
else:
raise NotImplementedError
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
# pdb.set_trace()
H, W = self.input_resolution
# print("H: ", H)
# print("W: ", W)
# pdb.set_trace()
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows, attn = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
def extra_repr(self):
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self):
return f"input_resolution={self.input_resolution}, dim={self.dim}"
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
norm_before_mlp='ln'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer, norm_before_mlp=norm_before_mlp)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
attns = []
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x, attn = blk(x)
if not self.training:
attns.append(attn.unsqueeze(0))
if self.downsample is not None:
x = self.downsample(x)
if not self.training:
attn = torch.cat(attns, dim = 0)
attn = torch.mean(attn, dim = 0)
return x, attn
def extra_repr(self):
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
# The Core of HTSAT
class HTSAT_Swin_Transformer(nn.Module):
r"""HTSAT based on the Swin Transformer
Args:
spec_size (int | tuple(int)): Input Spectrogram size. Default 256
patch_size (int | tuple(int)): Patch size. Default: 4
path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
in_chans (int): Number of input image channels. Default: 1 (mono)
num_classes (int): Number of classes for classification head. Default: 527
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 8
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
config (module): The configuration Module from config.py
"""
def __init__(self, spec_size=256, patch_size=4, patch_stride=(4,4),
in_chans=1, num_classes=527,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32],
window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
ape=False, patch_norm=True,
use_checkpoint=False, norm_before_mlp='ln', config = None, **kwargs):
super(HTSAT_Swin_Transformer, self).__init__()
self.config = config
self.spec_size = spec_size
self.patch_stride = patch_stride
self.patch_size = patch_size
self.window_size = window_size
self.embed_dim = embed_dim
self.depths = depths
self.ape = ape
self.in_chans = in_chans
self.num_classes = num_classes
self.num_heads = num_heads
self.num_layers = len(self.depths)
self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.qkv_bias = qkv_bias
self.qk_scale = None
self.patch_norm = patch_norm
self.norm_layer = norm_layer if self.patch_norm else None
self.norm_before_mlp = norm_before_mlp
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
# process mel-spec ; used only once
self.freq_ratio = self.spec_size // self.config.mel_bins
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.interpolate_ratio = 32 # Downsampled ratio
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=config.window_size, hop_length=config.hop_size,
win_length=config.window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=config.sample_rate, n_fft=config.window_size,
n_mels=config.mel_bins, fmin=config.fmin, fmax=config.fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2) # 2 2
self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
# split spctrogram into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans,
embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride = patch_stride)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.grid_size
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=self.drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=self.depths[i_layer],
num_heads=self.num_heads[i_layer],
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias, qk_scale=self.qk_scale,
drop=self.drop_rate, attn_drop=self.attn_drop_rate,
drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],
norm_layer=self.norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
norm_before_mlp=self.norm_before_mlp)
self.layers.append(layer)
self.norm = self.norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.maxpool = nn.AdaptiveMaxPool1d(1)
if self.config.enable_tscam:
SF = self.spec_size // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] // self.freq_ratio
self.tscam_conv = nn.Conv2d(
in_channels = self.num_features,
out_channels = self.num_classes,
kernel_size = (SF,3),
padding = (0,1)
)
self.head = nn.Linear(num_classes, num_classes)
else:
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
frames_num = x.shape[2]
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for i, layer in enumerate(self.layers):
x, attn = layer(x)
if self.config.enable_tscam:
# for x
x = self.norm(x)
B, N, C = x.shape
SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
x = x.permute(0,2,1).contiguous().reshape(B, C, SF, ST)
B, C, F, T = x.shape
# group 2D CNN
c_freq_bin = F // self.freq_ratio
x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
x = x.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1)
# get latent_output
latent_output = self.avgpool(torch.flatten(x,2))
latent_output = torch.flatten(latent_output, 1)
# display the attention map, if needed
if self.config.htsat_attn_heatmap:
# for attn
attn = torch.mean(attn, dim = 1)
attn = torch.mean(attn, dim = 1)
attn = attn.reshape(B, SF, ST)
c_freq_bin = SF // self.freq_ratio
attn = attn.reshape(B, SF // c_freq_bin, c_freq_bin, ST)
attn = attn.permute(0,2,1,3).contiguous().reshape(B, c_freq_bin, -1)
attn = attn.mean(dim = 1)
attn_max = torch.max(attn, dim = 1, keepdim = True)[0]
attn_min = torch.min(attn, dim = 1, keepdim = True)[0]
attn = ((attn * 0.15) + (attn_max * 0.85 - attn_min)) / (attn_max - attn_min)
attn = attn.unsqueeze(dim = 2)
x = self.tscam_conv(x)
x = torch.flatten(x, 2) # B, C, T
if self.config.htsat_attn_heatmap:
fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous() * attn, 8 * self.patch_stride[1])
else:
fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
x = self.avgpool(x)
x = torch.flatten(x, 1)
if self.config.loss_type == "clip_ce":
output_dict = {
'framewise_output': fpx, # already sigmoided
'clipwise_output': x,
'latent_output': latent_output
}
else:
output_dict = {
'framewise_output': fpx, # already sigmoided
'clipwise_output': torch.sigmoid(x),
'latent_output': latent_output
}
else:
x = self.norm(x) # B N C
B, N, C = x.shape
fpx = x.permute(0,2,1).contiguous().reshape(B, C, frames_num // (2 ** (len(self.depths) + 1)), frames_num // (2 ** (len(self.depths) + 1)) )
B, C, F, T = fpx.shape
c_freq_bin = F // self.freq_ratio
fpx = fpx.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
fpx = fpx.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1)
fpx = torch.sum(fpx, dim = 2)
fpx = interpolate(fpx.permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
if self.num_classes > 0:
x = self.head(x)
fpx = self.head(fpx)
output_dict = {'framewise_output': torch.sigmoid(fpx),
'clipwise_output': torch.sigmoid(x)}
return output_dict
def crop_wav(self, x, crop_size, spe_pos = None):
time_steps = x.shape[2]
tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
for i in range(len(x)):
if spe_pos is None:
crop_pos = random.randint(0, time_steps - crop_size - 1)
else:
crop_pos = spe_pos
tx[i][0] = x[i, 0, crop_pos:crop_pos + crop_size,:]
return tx
# Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
def reshape_wav2img(self, x):
B, C, T, F = x.shape
target_T = int(self.spec_size * self.freq_ratio)
target_F = self.spec_size // self.freq_ratio
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
# to avoid bicubic zero error
if T < target_T:
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
if F < target_F:
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
x = x.permute(0,1,3,2).contiguous()
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], self.freq_ratio, x.shape[3] // self.freq_ratio)
# print(x.shape)
x = x.permute(0,1,3,2,4).contiguous()
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
return x
# Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
def repeat_wat2img(self, x, cur_pos):
B, C, T, F = x.shape
target_T = int(self.spec_size * self.freq_ratio)
target_F = self.spec_size // self.freq_ratio
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
# to avoid bicubic zero error
if T < target_T:
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
if F < target_F:
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
x = x.permute(0,1,3,2).contiguous() # B C F T
x = x[:,:,:,cur_pos:cur_pos + self.spec_size]
x = x.repeat(repeats = (1,1,4,1))
return x
def forward(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False):# out_feat_keys: List[str] = None):
x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
if infer_mode:
# in infer mode. we need to handle different length audio input
frame_num = x.shape[2]
target_T = int(self.spec_size * self.freq_ratio)
repeat_ratio = math.floor(target_T / frame_num)
x = x.repeat(repeats=(1,1,repeat_ratio,1))
x = self.reshape_wav2img(x)
output_dict = self.forward_features(x)
elif self.config.enable_repeat_mode:
if self.training:
cur_pos = random.randint(0, (self.freq_ratio - 1) * self.spec_size - 1)
x = self.repeat_wat2img(x, cur_pos)
output_dict = self.forward_features(x)
else:
output_dicts = []
for cur_pos in range(0, (self.freq_ratio - 1) * self.spec_size + 1, self.spec_size):
tx = x.clone()
tx = self.repeat_wat2img(tx, cur_pos)
output_dicts.append(self.forward_features(tx))
clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
for d in output_dicts:
clipwise_output += d["clipwise_output"]
framewise_output += d["framewise_output"]
clipwise_output = clipwise_output / len(output_dicts)
framewise_output = framewise_output / len(output_dicts)
output_dict = {
'framewise_output': framewise_output,
'clipwise_output': clipwise_output
}
else:
if x.shape[2] > self.freq_ratio * self.spec_size:
if self.training:
x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
x = self.reshape_wav2img(x)
output_dict = self.forward_features(x)
else:
# Change: Hard code here
overlap_size = 344 #(x.shape[2] - 1) // 4
output_dicts = []
crop_size = 689 #(x.shape[2] - 1) // 2
for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
tx = self.reshape_wav2img(tx)
output_dicts.append(self.forward_features(tx))
clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
latent_output = torch.zeros_like(output_dicts[0]["latent_output"]).float().to(x.device)
for d in output_dicts:
clipwise_output += d["clipwise_output"]
framewise_output += d["framewise_output"]
latent_output += d["latent_output"]
clipwise_output = clipwise_output / len(output_dicts)
framewise_output = framewise_output / len(output_dicts)
latent_output = latent_output / len(output_dicts)
output_dict = {
'framewise_output': framewise_output,
'clipwise_output': clipwise_output,
'latent_output': latent_output,
}
else: # this part is typically used, and most easy one
x = self.reshape_wav2img(x)
output_dict = self.forward_features(x)
# x = self.head(x)
return output_dict
class HTSATWrapper(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, out_emb):
super().__init__()
# print("parameters are being overidden when using HTSAT")
# print("HTSAT only support loading a pretrained model on AudioSet")
# @TODO later look at what parameters are same and can be merged
self.htsat = HTSAT_Swin_Transformer(config=config)
def forward(self, x):
out_dict = self.htsat(x)
out_dict['embedding'] = out_dict['latent_output']
return out_dict
|
CLAP/msclap/models/htsat.py/0
|
{
"file_path": "CLAP/msclap/models/htsat.py",
"repo_id": "CLAP",
"token_count": 19803
}
| 160 |
Evaluating Pre-trained Models
=============================
First, download a pre-trained model along with its vocabularies:
.. code-block:: console
> curl https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2 | tar xvjf -
This model uses a `Byte Pair Encoding (BPE)
vocabulary <https://arxiv.org/abs/1508.07909>`__, so we'll have to apply
the encoding to the source text before it can be translated. This can be
done with the
`apply\_bpe.py <https://github.com/rsennrich/subword-nmt/blob/master/subword_nmt/apply_bpe.py>`__
script using the ``wmt14.en-fr.fconv-cuda/bpecodes`` file. ``@@`` is
used as a continuation marker and the original text can be easily
recovered with e.g. ``sed s/@@ //g`` or by passing the ``--remove-bpe``
flag to :ref:`fairseq-generate`. Prior to BPE, input text needs to be tokenized
using ``tokenizer.perl`` from
`mosesdecoder <https://github.com/moses-smt/mosesdecoder>`__.
Let's use :ref:`fairseq-interactive` to generate translations interactively.
Here, we use a beam size of 5 and preprocess the input with the Moses
tokenizer and the given Byte-Pair Encoding vocabulary. It will automatically
remove the BPE continuation markers and detokenize the output.
.. code-block:: console
> MODEL_DIR=wmt14.en-fr.fconv-py
> fairseq-interactive \
--path $MODEL_DIR/model.pt $MODEL_DIR \
--beam 5 --source-lang en --target-lang fr \
--tokenizer moses \
--bpe subword_nmt --bpe-codes $MODEL_DIR/bpecodes
| loading model(s) from wmt14.en-fr.fconv-py/model.pt
| [en] dictionary: 44206 types
| [fr] dictionary: 44463 types
| Type the input sentence and press return:
Why is it rare to discover new marine mammal species?
S-0 Why is it rare to discover new marine mam@@ mal species ?
H-0 -0.0643349438905716 Pourquoi est-il rare de découvrir de nouvelles espèces de mammifères marins?
P-0 -0.0763 -0.1849 -0.0956 -0.0946 -0.0735 -0.1150 -0.1301 -0.0042 -0.0321 -0.0171 -0.0052 -0.0062 -0.0015
This generation script produces three types of outputs: a line prefixed
with *O* is a copy of the original source sentence; *H* is the
hypothesis along with an average log-likelihood; and *P* is the
positional score per token position, including the
end-of-sentence marker which is omitted from the text.
Other types of output lines you might see are *D*, the detokenized hypothesis,
*T*, the reference target, *A*, alignment info, *E* the history of generation steps.
See the `README <https://github.com/pytorch/fairseq#pre-trained-models>`__ for a
full list of pre-trained models available.
Training a New Model
====================
The following tutorial is for machine translation. For an example of how
to use Fairseq for other tasks, such as :ref:`language modeling`, please see the
``examples/`` directory.
Data Pre-processing
-------------------
Fairseq contains example pre-processing scripts for several translation
datasets: IWSLT 2014 (German-English), WMT 2014 (English-French) and WMT
2014 (English-German). To pre-process and binarize the IWSLT dataset:
.. code-block:: console
> cd examples/translation/
> bash prepare-iwslt14.sh
> cd ../..
> TEXT=examples/translation/iwslt14.tokenized.de-en
> fairseq-preprocess --source-lang de --target-lang en \
--trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \
--destdir data-bin/iwslt14.tokenized.de-en
This will write binarized data that can be used for model training to
``data-bin/iwslt14.tokenized.de-en``.
Training
--------
Use :ref:`fairseq-train` to train a new model. Here a few example settings that work
well for the IWSLT 2014 dataset:
.. code-block:: console
> mkdir -p checkpoints/fconv
> CUDA_VISIBLE_DEVICES=0 fairseq-train data-bin/iwslt14.tokenized.de-en \
--optimizer nag --lr 0.25 --clip-norm 0.1 --dropout 0.2 --max-tokens 4000 \
--arch fconv_iwslt_de_en --save-dir checkpoints/fconv
By default, :ref:`fairseq-train` will use all available GPUs on your machine. Use the
``CUDA_VISIBLE_DEVICES`` environment variable to select specific GPUs and/or to
change the number of GPU devices that will be used.
Also note that the batch size is specified in terms of the maximum
number of tokens per batch (``--max-tokens``). You may need to use a
smaller value depending on the available GPU memory on your system.
Generation
----------
Once your model is trained, you can generate translations using
:ref:`fairseq-generate` **(for binarized data)** or
:ref:`fairseq-interactive` **(for raw text)**:
.. code-block:: console
> fairseq-generate data-bin/iwslt14.tokenized.de-en \
--path checkpoints/fconv/checkpoint_best.pt \
--batch-size 128 --beam 5
| [de] dictionary: 35475 types
| [en] dictionary: 24739 types
| data-bin/iwslt14.tokenized.de-en test 6750 examples
| model fconv
| loaded checkpoint trainings/fconv/checkpoint_best.pt
S-721 danke .
T-721 thank you .
...
To generate translations with only a CPU, use the ``--cpu`` flag. BPE
continuation markers can be removed with the ``--remove-bpe`` flag.
Advanced Training Options
=========================
Large mini-batch training with delayed updates
----------------------------------------------
The ``--update-freq`` option can be used to accumulate gradients from
multiple mini-batches and delay updating, creating a larger effective
batch size. Delayed updates can also improve training speed by reducing
inter-GPU communication costs and by saving idle time caused by variance
in workload across GPUs. See `Ott et al.
(2018) <https://arxiv.org/abs/1806.00187>`__ for more details.
To train on a single GPU with an effective batch size that is equivalent
to training on 8 GPUs:
.. code-block:: console
> CUDA_VISIBLE_DEVICES=0 fairseq-train --update-freq 8 (...)
Training with half precision floating point (FP16)
--------------------------------------------------
.. note::
FP16 training requires a Volta GPU and CUDA 9.1 or greater
Recent GPUs enable efficient half precision floating point computation,
e.g., using `Nvidia Tensor Cores
<https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html>`__.
Fairseq supports FP16 training with the ``--fp16`` flag:
.. code-block:: console
> fairseq-train --fp16 (...)
Distributed training
--------------------
Distributed training in fairseq is implemented on top of ``torch.distributed``.
The easiest way to launch jobs is with the `torch.distributed.launch
<https://pytorch.org/docs/stable/distributed.html#launch-utility>`__ tool.
For example, to train a large English-German Transformer model on 2 nodes each
with 8 GPUs (in total 16 GPUs), run the following command on each node,
replacing ``node_rank=0`` with ``node_rank=1`` on the second node and making
sure to update ``--master_addr`` to the IP address of the first node:
.. code-block:: console
> python -m torch.distributed.launch --nproc_per_node=8 \
--nnodes=2 --node_rank=0 --master_addr="192.168.1.1" \
--master_port=12345 \
$(which fairseq-train) data-bin/wmt16_en_de_bpe32k \
--arch transformer_vaswani_wmt_en_de_big --share-all-embeddings \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 4000 \
--lr 0.0005 \
--dropout 0.3 --weight-decay 0.0 --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 3584 \
--max-epoch 70 \
--fp16
On SLURM clusters, fairseq will automatically detect the number of nodes and
GPUs, but a port number must be provided:
.. code-block:: console
> salloc --gpus=16 --nodes 2 (...)
> srun fairseq-train --distributed-port 12345 (...).
Sharding very large datasets
----------------------------
It can be challenging to train over very large datasets, particularly if your
machine does not have much system RAM. Most tasks in fairseq support training
over "sharded" datasets, in which the original dataset has been preprocessed
into non-overlapping chunks (or "shards").
For example, instead of preprocessing all your data into a single "data-bin"
directory, you can split the data and create "data-bin1", "data-bin2", etc.
Then you can adapt your training command like so:
.. code-block:: console
> fairseq-train data-bin1:data-bin2:data-bin3 (...)
Training will now iterate over each shard, one by one, with each shard
corresponding to an "epoch", thus reducing system memory usage.
|
COCO-LM/fairseq/docs/getting_started.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/getting_started.rst",
"repo_id": "COCO-LM",
"token_count": 2848
}
| 161 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the current directory
cur_dir = os.path.dirname(__file__)
for file in os.listdir(cur_dir):
path = os.path.join(cur_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
mod_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module(__name__ + "." + mod_name)
|
COCO-LM/fairseq/examples/adaptive_span/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/adaptive_span/__init__.py",
"repo_id": "COCO-LM",
"token_count": 248
}
| 162 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models.bart import BARTModel
import argparse
XSUM_KWARGS = dict(beam=6, lenpen=1.0, max_len_b=60, min_len=10, no_repeat_ngram_size=3)
CNN_KWARGS = dict(beam=4, lenpen=2.0, max_len_b=140, min_len=55, no_repeat_ngram_size=3)
@torch.no_grad()
def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs):
count = 1
# if n_obs is not None: bsz = min(bsz, n_obs)
with open(infile) as source, open(outfile, "w") as fout:
sline = source.readline().strip()
slines = [sline]
for sline in source:
if n_obs is not None and count > n_obs:
break
if count % bsz == 0:
hypotheses_batch = bart.sample(slines, **eval_kwargs)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + "\n")
fout.flush()
slines = []
slines.append(sline.strip())
count += 1
if slines != []:
hypotheses_batch = bart.sample(slines, **eval_kwargs)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + "\n")
fout.flush()
def main():
"""
Usage::
python examples/bart/summarize.py \
--model-dir $HOME/bart.large.cnn \
--model-file model.pt \
--src $HOME/data-bin/cnn_dm/test.source
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--model-file",
default="checkpoint_best.pt",
help="where in model_dir are weights saved",
)
parser.add_argument(
"--src", default="test.source", help="text to summarize", type=str
)
parser.add_argument(
"--out", default="test.hypo", help="where to save summaries", type=str
)
parser.add_argument("--bsz", default=32, help="where to save summaries", type=int)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument(
"--xsum-kwargs",
action="store_true",
default=False,
help="if true use XSUM_KWARGS else CNN_KWARGS",
)
args = parser.parse_args()
eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS
if args.model_dir == "pytorch/fairseq":
bart = torch.hub.load("pytorch/fairseq", args.model_file)
else:
bart = BARTModel.from_pretrained(
args.model_dir,
checkpoint_file=args.model_file,
data_name_or_path=args.model_dir,
)
bart = bart.eval()
if torch.cuda.is_available():
bart = bart.cuda().half()
generate(
bart, args.src, bsz=args.bsz, n_obs=args.n, outfile=args.out, **eval_kwargs
)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/bart/summarize.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/bart/summarize.py",
"repo_id": "COCO-LM",
"token_count": 1484
}
| 163 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import numpy as np
DIM = 1024
def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False):
target_ids = [tid for tid in target_embs]
source_mat = np.stack(source_embs.values(), axis=0)
normalized_source_mat = source_mat / np.linalg.norm(
source_mat, axis=1, keepdims=True
)
target_mat = np.stack(target_embs.values(), axis=0)
normalized_target_mat = target_mat / np.linalg.norm(
target_mat, axis=1, keepdims=True
)
sim_mat = normalized_source_mat.dot(normalized_target_mat.T)
if return_sim_mat:
return sim_mat
neighbors_map = {}
for i, sentence_id in enumerate(source_embs):
idx = np.argsort(sim_mat[i, :])[::-1][:k]
neighbors_map[sentence_id] = [target_ids[tid] for tid in idx]
return neighbors_map
def load_embeddings(directory, LANGS):
sentence_embeddings = {}
sentence_texts = {}
for lang in LANGS:
sentence_embeddings[lang] = {}
sentence_texts[lang] = {}
lang_dir = f"{directory}/{lang}"
embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*")
for embed_file in embedding_files:
shard_id = embed_file.split(".")[-1]
embeddings = np.fromfile(embed_file, dtype=np.float32)
num_rows = embeddings.shape[0] // DIM
embeddings = embeddings.reshape((num_rows, DIM))
with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file:
for idx, line in enumerate(sentence_file):
sentence_id, sentence = line.strip().split("\t")
sentence_texts[lang][sentence_id] = sentence
sentence_embeddings[lang][sentence_id] = embeddings[idx, :]
return sentence_embeddings, sentence_texts
def compute_accuracy(directory, LANGS):
sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS)
top_1_accuracy = {}
top1_str = " ".join(LANGS) + "\n"
for source_lang in LANGS:
top_1_accuracy[source_lang] = {}
top1_str += f"{source_lang} "
for target_lang in LANGS:
top1 = 0
top5 = 0
neighbors_map = compute_dist(
sentence_embeddings[source_lang], sentence_embeddings[target_lang]
)
for sentence_id, neighbors in neighbors_map.items():
if sentence_id == neighbors[0]:
top1 += 1
if sentence_id in neighbors[:5]:
top5 += 1
n = len(sentence_embeddings[target_lang])
top1_str += f"{top1/n} "
top1_str += "\n"
print(top1_str)
print(top1_str, file=open(f"{directory}/accuracy", "w"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Analyze encoder outputs")
parser.add_argument("directory", help="Source language corpus")
parser.add_argument("--langs", help="List of langs")
args = parser.parse_args()
langs = args.langs.split(",")
compute_accuracy(args.directory, langs)
|
COCO-LM/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py",
"repo_id": "COCO-LM",
"token_count": 1476
}
| 164 |
#!/bin/bash
# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
URLS=(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip"
)
FILES=(
"wikitext-103-v1.zip"
)
for ((i=0;i<${#URLS[@]};++i)); do
file=${FILES[i]}
if [ -f $file ]; then
echo "$file already exists, skipping download"
else
url=${URLS[i]}
wget "$url"
if [ -f $file ]; then
echo "$url successfully downloaded."
else
echo "$url not successfully downloaded."
exit -1
fi
if [ ${file: -4} == ".tgz" ]; then
tar zxvf $file
elif [ ${file: -4} == ".tar" ]; then
tar xvf $file
elif [ ${file: -4} == ".zip" ]; then
unzip $file
fi
fi
done
cd ..
|
COCO-LM/fairseq/examples/language_model/prepare-wikitext-103.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/language_model/prepare-wikitext-103.sh",
"repo_id": "COCO-LM",
"token_count": 425
}
| 165 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.tasks import register_task
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
from .loss.latent_depth import LatentLayersKLLoss, LatentLayersSparsityLoss
@register_task("multilingual_translation_latent_depth")
class MultilingualTranslationTaskLatentDepth(MultilingualTranslationTask):
"""A task for multiple translation with latent depth.
See `"Deep Transformer with Latent Depth"
(Li et al., 2020) <https://arxiv.org/pdf/2009.13102.pdf>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--encoder-latent-layer', action='store_true', help='latent layer selection in encoder')
parser.add_argument('--decoder-latent-layer', action='store_true', help='latent layer selection in decoder')
parser.add_argument('--target-layers', default=-1, type=int,
help='number of effective layers to learn; -1 means no constraint')
parser.add_argument('--sparsity-weight', default=0.0, type=float,
help='weight for sparsity loss')
parser.add_argument('--share-weight', default=0.0, type=float,
help='weight for sharing loss')
parser.add_argument('--soft-update', default=1, type=int,
help='number of updates with soft sampling')
parser.add_argument('--anneal-updates', default=1, type=int,
help='number of updates to anneal the KL loss weight')
parser.add_argument('--prior', default="uniform", type=str,
help='prior used for computing KL loss')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.src_langs, self.tgt_langs = zip(
*[(lang.split("-")[0], lang.split("-")[1]) for lang in args.lang_pairs]
)
if self.training and self.encoder_latent_layer:
assert self.args.share_encoders
if self.training and self.decoder_latent_layer:
assert self.args.share_decoders
if training or self.encoder_latent_layer or self.decoder_latent_layer:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
if self.training and (self.encoder_latent_layer or self.decoder_latent_layer):
self.kl_loss = LatentLayersKLLoss(self.args)
self.sparsity_loss = LatentLayersSparsityLoss(self.args)
def _per_lang_pair_train_loss(
self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad
):
src, tgt = lang_pair.split("-")
if self.encoder_latent_layer:
src_lang_idx = self.src_lang_idx_dict[src]
model.models[lang_pair].encoder.set_lang_idx(src_lang_idx)
model.models[lang_pair].encoder.layer_select.hard_select = (
update_num > self.args.soft_update
)
if self.decoder_latent_layer:
tgt_lang_idx = self.tgt_lang_idx_dict[tgt]
model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx)
model.models[lang_pair].decoder.layer_select.hard_select = (
update_num > self.args.soft_update
)
loss, sample_size, logging_output = criterion(
model.models[lang_pair], sample[lang_pair]
)
if self.encoder_latent_layer:
none_samples = sum(
1 if x is None else 0
for x in model.models[lang_pair].encoder.layer_select.layer_samples
)
if none_samples == 0 or self.args.prior != "agged_posterior":
loss += self.kl_loss(
model.models[lang_pair].encoder.layer_select.layer_samples,
src_lang_idx,
update_num,
sample_size,
)
if self.decoder_latent_layer:
none_samples = sum(
1 if x is None else 0
for x in model.models[lang_pair].decoder.layer_select.layer_samples
)
if none_samples == 0 or self.args.prior != "agged_posterior":
loss += self.kl_loss(
model.models[lang_pair].decoder.layer_select.layer_samples,
tgt_lang_idx,
update_num,
sample_size,
)
if ignore_grad:
loss *= 0
if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num):
# need to retain the graph if sparsity loss needs to be added
loss.backward(retain_graph=True)
else:
optimizer.backward(loss)
return loss, sample_size, logging_output
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
agg_loss, agg_sample_size, agg_logging_output = super().train_step(
sample, model, criterion, optimizer, update_num, ignore_grad
)
# compute auxiliary loss from layere sparsity, based on all samples from all languages
if hasattr(self, "sparsity_loss") and self.sparsity_loss.is_valid(update_num):
sparsity_loss = 0
if self.encoder_latent_layer:
sparsity_loss += self.sparsity_loss(
next(
iter(model.models.values())
).encoder.layer_select.layer_samples,
update_num,
agg_sample_size,
)
if self.decoder_latent_layer:
sparsity_loss += self.sparsity_loss(
next(
iter(model.models.values())
).decoder.layer_select.layer_samples,
update_num,
agg_sample_size,
)
if sparsity_loss > 0:
optimizer.backward(sparsity_loss)
return agg_loss, agg_sample_size, agg_logging_output
def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample):
src, tgt = lang_pair.split("-")
if self.encoder_latent_layer:
src_lang_idx = self.src_lang_idx_dict[src]
model.models[lang_pair].encoder.set_lang_idx(src_lang_idx)
if self.decoder_latent_layer:
tgt_lang_idx = self.tgt_lang_idx_dict[tgt]
model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx)
loss, sample_size, logging_output = criterion(
model.models[lang_pair], sample[lang_pair]
)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
if self.encoder_latent_layer or self.decoder_latent_layer:
for model in models:
if self.encoder_latent_layer:
assert model.encoder.layer_select is not None
src_lang_idx = self.src_lang_idx_dict[self.args.source_lang]
model.encoder.set_lang_idx(src_lang_idx)
if self.decoder_latent_layer:
assert model.decoder.layer_select is not None
tgt_lang_idx = self.tgt_lang_idx_dict[self.args.target_lang]
model.decoder.set_lang_idx(tgt_lang_idx)
return super().inference_step(
generator, models, sample, prefix_tokens, constraints
)
@property
def encoder_latent_layer(self):
return (
hasattr(self.args, "encoder_latent_layer")
and self.args.encoder_latent_layer
)
@property
def decoder_latent_layer(self):
return (
hasattr(self.args, "decoder_latent_layer")
and self.args.decoder_latent_layer
)
@property
def src_lang_idx_dict(self):
return {lang: lang_idx for lang_idx, lang in enumerate(self.src_langs)}
@property
def tgt_lang_idx_dict(self):
return {lang: lang_idx for lang_idx, lang in enumerate(self.tgt_langs)}
|
COCO-LM/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/latent_depth/latent_depth_src/multilingual_translation_latent_depth.py",
"repo_id": "COCO-LM",
"token_count": 4132
}
| 166 |
# M2M-100 Tokenization
We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results.
To reproduce the results, follow these steps:
```
tgt_lang=...
reference_translation=...
cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp
cat $reference_translation |sh tok.sh $tgt_lang > ref
sacrebleu -tok 'none' ref < hyp
```
## Installation
Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh
If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install
|
COCO-LM/fairseq/examples/m2m_100/tokenizers/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/m2m_100/tokenizers/README.md",
"repo_id": "COCO-LM",
"token_count": 207
}
| 167 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
from utils.dedup import deup
import sys
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
def get_directions(folder):
raw_files = glob.glob(f'{folder}/train*')
directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
return directions
def diff_list(lhs, rhs):
return set(lhs).difference(set(rhs))
def check_diff(
from_src_file, from_tgt_file,
to_src_file, to_tgt_file,
):
seen_in_from = set()
seen_src_in_from = set()
seen_tgt_in_from = set()
from_count = 0
with open(from_src_file, encoding='utf-8') as fsrc, \
open(from_tgt_file, encoding='utf-8') as ftgt:
for s, t in zip(fsrc, ftgt):
seen_in_from.add((s, t))
seen_src_in_from.add(s)
seen_tgt_in_from.add(t)
from_count += 1
common = 0
common_src = 0
common_tgt = 0
to_count = 0
seen = set()
with open(to_src_file, encoding='utf-8') as fsrc, \
open(to_tgt_file, encoding='utf-8') as ftgt:
for s, t in zip(fsrc, ftgt):
to_count += 1
if (s, t) not in seen:
if (s, t) in seen_in_from:
common += 1
if s in seen_src_in_from:
common_src += 1
seen_src_in_from.remove(s)
if t in seen_tgt_in_from:
common_tgt += 1
seen_tgt_in_from.remove(t)
seen.add((s, t))
return common, common_src, common_tgt, from_count, to_count
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, required=True,
help="the data folder ")
parser.add_argument("--split", type=str, default='test',
help="split (valid, test) to check against training data")
parser.add_argument('--directions', type=str, default=None, required=False)
args = parser.parse_args()
if args.directions is None:
directions = set(get_directions(args.folder))
directions = sorted(directions)
else:
directions = args.directions.split(',')
directions = sorted(set(directions))
results = []
print(f'checking where {args.split} split data are in training')
print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size')
for direction in directions:
src, tgt = direction.split('-')
from_src_file = f'{args.folder}/{args.split}.{src}-{tgt}.{src}'
from_tgt_file = f'{args.folder}/{args.split}.{src}-{tgt}.{tgt}'
if not os.path.exists(from_src_file):
# some test/valid data might in reverse directinos:
from_src_file = f'{args.folder}/{args.split}.{tgt}-{src}.{src}'
from_tgt_file = f'{args.folder}/{args.split}.{tgt}-{src}.{tgt}'
to_src_file = f'{args.folder}/train.{src}-{tgt}.{src}'
to_tgt_file = f'{args.folder}/train.{src}-{tgt}.{tgt}'
if not os.path.exists(to_src_file) or not os.path.exists(from_src_file):
continue
r = check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file)
results.append(r)
print(f'{direction}\t', '\t'.join(map(str, r)))
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py",
"repo_id": "COCO-LM",
"token_count": 1752
}
| 168 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def deup(src_file, tgt_file, src_file_out, tgt_file_out):
seen = set()
dup_count = 0
with open(src_file, encoding='utf-8') as fsrc, \
open(tgt_file, encoding='utf-8') as ftgt, \
open(src_file_out, 'w', encoding='utf-8') as fsrc_out, \
open(tgt_file_out, 'w', encoding='utf-8') as ftgt_out:
for s, t in zip(fsrc, ftgt):
if (s, t) not in seen:
fsrc_out.write(s)
ftgt_out.write(t)
seen.add((s, t))
else:
dup_count += 1
print(f'number of duplication: {dup_count}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--src-file", type=str, required=True,
help="src file")
parser.add_argument("--tgt-file", type=str, required=True,
help="tgt file")
parser.add_argument("--src-file-out", type=str, required=True,
help="src ouptut file")
parser.add_argument("--tgt-file-out", type=str, required=True,
help="tgt ouput file")
args = parser.parse_args()
deup(args.src_file, args.tgt_file, args.src_file_out, args.tgt_file_out)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/multilingual/data_scripts/utils/dedup.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/utils/dedup.py",
"repo_id": "COCO-LM",
"token_count": 713
}
| 169 |
# Examples of Training scripts for Non-autoregressive Machine Translation models
### Non-autoregressive Transformer (NAT, Gu et al., 2017)
Note that we need to have an additional module to perform "length prediction" (`--length-loss-factor`) before generating the whole sequence.
```bash
fairseq-train \
data-bin/wmt14_en_de_distill \
--save-dir checkpoints \
--ddp-backend=legacy_ddp \
--task translation_lev \
--criterion nat_loss \
--arch nonautoregressive_transformer \
--noise full_mask \
--share-all-embeddings \
--optimizer adam --adam-betas '(0.9,0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--stop-min-lr '1e-09' --warmup-updates 10000 \
--warmup-init-lr '1e-07' --label-smoothing 0.1 \
--dropout 0.3 --weight-decay 0.01 \
--decoder-learned-pos \
--encoder-learned-pos \
--pred-length-offset \
--length-loss-factor 0.1 \
--apply-bert-init \
--log-format 'simple' --log-interval 100 \
--fixed-validation-seed 7 \
--max-tokens 8000 \
--save-interval-updates 10000 \
--max-update 300000
```
### Fast Structured Decoding for Sequence Models (NAT-CRF, Sun et al., 2019)
Note that we implemented a low-rank appromixated CRF model by setting `--crf-lowrank-approx=32` and `--crf-beam-approx=64` as discribed in the original paper. All other settings are the same as the vanilla NAT model.
```bash
fairseq-train \
data-bin/wmt14_en_de_distill \
--save-dir checkpoints \
--ddp-backend=legacy_ddp \
--task translation_lev \
--criterion nat_loss \
--arch nacrf_transformer \
--noise full_mask \
--share-all-embeddings \
--optimizer adam --adam-betas '(0.9,0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--stop-min-lr '1e-09' --warmup-updates 10000 \
--warmup-init-lr '1e-07' --label-smoothing 0.1 \
--dropout 0.3 --weight-decay 0.01 \
--decoder-learned-pos \
--encoder-learned-pos \
--pred-length-offset \
--length-loss-factor 0.1 \
--word-ins-loss-factor 0.5 \
--crf-lowrank-approx 32 \
--crf-beam-approx 64 \
--apply-bert-init \
--log-format 'simple' --log-interval 100 \
--fixed-validation-seed 7 \
--max-tokens 8000 \
--save-interval-updates 10000 \
--max-update 300000
```
### Non-autoregressive Transformer with Iterative Refinement (iNAT, Lee et al., 2018)
Note that `--train-step` means how many iterations of refinement we used during training, and `--dae-ratio` controls the ratio of denoising auto-encoder training described in the original paper.
```bash
fairseq-train \
data-bin/wmt14_en_de_distill \
--save-dir checkpoints \
--ddp-backend=legacy_ddp \
--task translation_lev \
--criterion nat_loss \
--arch iterative_nonautoregressive_transformer \
--noise full_mask \
--share-all-embeddings \
--optimizer adam --adam-betas '(0.9,0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--stop-min-lr '1e-09' --warmup-updates 10000 \
--warmup-init-lr '1e-07' --label-smoothing 0.1 \
--dropout 0.3 --weight-decay 0.01 \
--decoder-learned-pos \
--encoder-learned-pos \
--pred-length-offset \
--length-loss-factor 0.1 \
--train-step 4 \
--dae-ratio 0.5 \
--stochastic-approx \
--apply-bert-init \
--log-format 'simple' --log-interval 100 \
--fixed-validation-seed 7 \
--max-tokens 8000 \
--save-interval-updates 10000 \
--max-update 300000
```
### Insertion Transformer (InsT, Stern et al., 2019)
Note that we need to specify the "slot-loss" (uniform or balanced tree) described in the original paper. Here we use `--label-tau` to control the temperature.
```bash
fairseq-train \
data-bin/wmt14_en_de_distill \
--save-dir checkpoints \
--ddp-backend=legacy_ddp \
--task translation_lev \
--criterion nat_loss \
--arch insertion_transformer \
--noise random_delete \
--share-all-embeddings \
--optimizer adam --adam-betas '(0.9,0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--stop-min-lr '1e-09' --warmup-updates 10000 \
--warmup-init-lr '1e-07' --label-smoothing 0.1 \
--dropout 0.3 --weight-decay 0.01 \
--decoder-learned-pos \
--encoder-learned-pos \
--apply-bert-init \
--log-format 'simple' --log-interval 100 \
--fixed-validation-seed 7 \
--max-tokens 8000 \
--save-interval-updates 10000 \
--max-update 300000
```
### Mask Predict (CMLM, Ghazvininejad et al., 2019)
```bash
fairseq-train \
data-bin/wmt14_en_de_distill \
--save-dir checkpoints \
--ddp-backend=legacy_ddp \
--task translation_lev \
--criterion nat_loss \
--arch cmlm_transformer \
--noise random_mask \
--share-all-embeddings \
--optimizer adam --adam-betas '(0.9,0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--stop-min-lr '1e-09' --warmup-updates 10000 \
--warmup-init-lr '1e-07' --label-smoothing 0.1 \
--dropout 0.3 --weight-decay 0.01 \
--decoder-learned-pos \
--encoder-learned-pos \
--apply-bert-init \
--log-format 'simple' --log-interval 100 \
--fixed-validation-seed 7 \
--max-tokens 8000 \
--save-interval-updates 10000 \
--max-update 300000
```
### Levenshtein Transformer (LevT, Gu et al., 2019)
```bash
fairseq-train \
data-bin/wmt14_en_de_distill \
--save-dir checkpoints \
--ddp-backend=legacy_ddp \
--task translation_lev \
--criterion nat_loss \
--arch levenshtein_transformer \
--noise random_delete \
--share-all-embeddings \
--optimizer adam --adam-betas '(0.9,0.98)' \
--lr 0.0005 --lr-scheduler inverse_sqrt \
--stop-min-lr '1e-09' --warmup-updates 10000 \
--warmup-init-lr '1e-07' --label-smoothing 0.1 \
--dropout 0.3 --weight-decay 0.01 \
--decoder-learned-pos \
--encoder-learned-pos \
--apply-bert-init \
--log-format 'simple' --log-interval 100 \
--fixed-validation-seed 7 \
--max-tokens 8000 \
--save-interval-updates 10000 \
--max-update 300000
```
|
COCO-LM/fairseq/examples/nonautoregressive_translation/scripts.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/nonautoregressive_translation/scripts.md",
"repo_id": "COCO-LM",
"token_count": 2500
}
| 170 |
# Finetuning RoBERTa on RACE tasks
### 1) Download the data from RACE website (http://www.cs.cmu.edu/~glai1/data/race/)
### 2) Preprocess RACE data:
```bash
python ./examples/roberta/preprocess_RACE.py --input-dir <input-dir> --output-dir <extracted-data-dir>
./examples/roberta/preprocess_RACE.sh <extracted-data-dir> <output-dir>
```
### 3) Fine-tuning on RACE:
```bash
MAX_EPOCH=5 # Number of training epochs.
LR=1e-05 # Peak LR for fixed LR scheduler.
NUM_CLASSES=4
MAX_SENTENCES=1 # Batch size per GPU.
UPDATE_FREQ=8 # Accumulate gradients to simulate training on 8 GPUs.
DATA_DIR=/path/to/race-output-dir
ROBERTA_PATH=/path/to/roberta/model.pt
CUDA_VISIBLE_DEVICES=0,1 fairseq-train $DATA_DIR --ddp-backend=legacy_ddp \
--restore-file $ROBERTA_PATH \
--reset-optimizer --reset-dataloader --reset-meters \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--task sentence_ranking \
--num-classes $NUM_CLASSES \
--init-token 0 --separator-token 2 \
--max-option-length 128 \
--max-positions 512 \
--shorten-method "truncate" \
--arch roberta_large \
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
--criterion sentence_ranking \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \
--clip-norm 0.0 \
--lr-scheduler fixed --lr $LR \
--fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--batch-size $MAX_SENTENCES \
--required-batch-size-multiple 1 \
--update-freq $UPDATE_FREQ \
--max-epoch $MAX_EPOCH
```
**Note:**
a) As contexts in RACE are relatively long, we are using smaller batch size per GPU while increasing update-freq to achieve larger effective batch size.
b) Above cmd-args and hyperparams are tested on one Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`.
c) The setting in above command is based on our hyperparam search within a fixed search space (for careful comparison across models). You might be able to find better metrics with wider hyperparam search.
### 4) Evaluation:
```
DATA_DIR=/path/to/race-output-dir # data directory used during training
MODEL_PATH=/path/to/checkpoint_best.pt # path to the finetuned model checkpoint
PREDS_OUT=preds.tsv # output file path to save prediction
TEST_SPLIT=test # can be test (Middle) or test1 (High)
fairseq-validate \
$DATA_DIR \
--valid-subset $TEST_SPLIT \
--path $MODEL_PATH \
--batch-size 1 \
--task sentence_ranking \
--criterion sentence_ranking \
--save-predictions $PREDS_OUT
```
|
COCO-LM/fairseq/examples/roberta/README.race.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/README.race.md",
"repo_id": "COCO-LM",
"token_count": 1065
}
| 171 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from collections import defaultdict
from examples.simultaneous_translation.eval.eval_latency import LatencyScorer
from vizseq.scorers.bleu import BLEUScorer
from vizseq.scorers.meteor import METEORScorer
from vizseq.scorers.ter import TERScorer
DEFAULT_EOS = "</s>"
class SimulScorer(object):
def __init__(self, args):
self.tokenizer = args.tokenizer
self.output_dir = args.output
if args.output is not None:
self.output_files = {
"text": os.path.join(args.output, "text"),
"delay": os.path.join(args.output, "delay"),
"scores": os.path.join(args.output, "scores"),
}
else:
self.output_files = None
self.eos = DEFAULT_EOS
self.data = {"tgt": []}
self.reset()
def get_info(self):
return {"num_sentences": len(self)}
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--src-file', type=str, required=True,
help='Source input file')
parser.add_argument('--tgt-file', type=str, required=True,
help='Target reference file')
parser.add_argument('--tokenizer', default="13a", choices=["none", "13a"],
help='Tokenizer used for sacrebleu')
parser.add_argument('--output', type=str, default=None,
help='Path for output directory')
# fmt: on
def send_src(self, sent_id, *args):
raise NotImplementedError
def recv_hyp(self, sent_id, list_of_tokens):
for token in list_of_tokens:
self.translations[sent_id].append((token, self.steps[sent_id]))
def reset(self):
self.steps = defaultdict(int)
self.translations = defaultdict(list)
def src_lengths(self):
raise NotImplementedError
def score(self):
translations = []
delays = []
for i in range(1 + max(self.translations.keys())):
translations += [" ".join(t[0] for t in self.translations[i][:-1])]
delays += [[t[1] for t in self.translations[i]]]
bleu_score = BLEUScorer(
sent_level=False,
corpus_level=True,
extra_args={"bleu_tokenizer": self.tokenizer},
).score(translations, [self.data["tgt"]])
ter_score = TERScorer(sent_level=False, corpus_level=True).score(
translations, [self.data["tgt"]]
)
meteor_score = METEORScorer(sent_level=False, corpus_level=True).score(
translations, [self.data["tgt"]]
)
latency_score = LatencyScorer().score(
[
{"src_len": src_len, "delays": delay}
for src_len, delay in zip(self.src_lengths(), delays)
],
start_from_zero=False,
)
scores = {
"BLEU": bleu_score[0],
"TER": ter_score[0],
"METEOR": meteor_score[0],
"DAL": latency_score["differentiable_average_lagging"],
"AL": latency_score["average_lagging"],
"AP": latency_score["average_proportion"],
}
if self.output_files is not None:
try:
os.makedirs(self.output_dir, exist_ok=True)
self.write_results_to_file(translations, delays, scores)
except BaseException as be:
print(f"Failed to write results to {self.output_dir}.")
print(be)
print("Skip writing predictions")
return scores
def write_results_to_file(self, translations, delays, scores):
if self.output_files["text"] is not None:
with open(self.output_files["text"], "w") as f:
for line in translations:
f.write(line + "\n")
if self.output_files["delay"] is not None:
with open(self.output_files["delay"], "w") as f:
for i, delay in enumerate(delays):
f.write(
json.dumps({"src_len": self.src_lengths()[i], "delays": delay})
+ "\n"
)
with open(self.output_files["scores"], "w") as f:
for key, value in scores.items():
f.write(f"{key}, {value}\n")
@classmethod
def _load_text_file(cls, file, split=False):
with open(file) as f:
if split:
return [r.strip().split() for r in f]
else:
return [r.strip() for r in f]
@classmethod
def _load_text_from_json(cls, file):
list_to_return = []
with open(file) as f:
content = json.load(f)
for item in content["utts"].values():
list_to_return.append(item["output"]["text"].strip())
return list_to_return
@classmethod
def _load_wav_info_from_json(cls, file):
list_to_return = []
with open(file) as f:
content = json.load(f)
for item in content["utts"].values():
list_to_return.append(
{
"path": item["input"]["path"].strip(),
"length": item["input"]["length_ms"],
}
)
return list_to_return
@classmethod
def _load_wav_info_from_list(cls, file):
list_to_return = []
with open(file) as f:
for line in f:
list_to_return.append(
{
"path": line.strip(),
}
)
return list_to_return
def __len__(self):
return len(self.data["tgt"])
|
COCO-LM/fairseq/examples/simultaneous_translation/eval/scorers/scorer.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/scorers/scorer.py",
"repo_id": "COCO-LM",
"token_count": 2964
}
| 172 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from examples.speech_recognition.data.replabels import pack_replabels
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
|
COCO-LM/fairseq/examples/speech_recognition/criterions/ASG_loss.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/criterions/ASG_loss.py",
"repo_id": "COCO-LM",
"token_count": 2794
}
| 173 |
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.models." + model_name)
|
COCO-LM/fairseq/examples/speech_recognition/models/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/models/__init__.py",
"repo_id": "COCO-LM",
"token_count": 107
}
| 174 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang"]
class mTEDx(Dataset):
"""
Create a Dataset for Multilingual TEDx.
Each item is a tuple of the form: waveform, sample_rate, source utterance,
target utterance, speaker_id, utterance_id
"""
SPLITS = ["train", "valid", "test"]
LANGPAIRS = ["es-es", "fr-fr", "pt-pt", "it-it", "ru-ru", "el-el", "ar-ar", "de-de",
"es-en", "es-fr", "es-pt", "es-it", "fr-en", "fr-es", "fr-pt",
"pt-en", "pt-es", "it-en", "it-es", "ru-en", "el-en"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGPAIRS
_root = Path(root) / f"{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the Multilingual TEDx YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
src, tgt = lang.split("-")
for _lang in [src, tgt]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_filename = wav_filename.replace(".wav", ".flac")
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment[src],
segment[tgt],
segment["speaker_id"],
tgt,
_id,
)
)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in mTEDx.LANGPAIRS:
cur_root = root / f"{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
feature_root = cur_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in mTEDx.SPLITS:
print(f"Fetching split {split}...")
dataset = mTEDx(root.as_posix(), lang, split)
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = cur_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in mTEDx.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = mTEDx(args.data_root, lang, split)
for wav, sr, src_utt, tgt_utt, speaker_id, tgt_lang, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(wav.size(1) / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
manifest["tgt_text"].append(src_utt if args.task == "asr" else tgt_utt)
manifest["speaker"].append(speaker_id)
manifest["tgt_lang"].append(tgt_lang)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(feature_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all((cur_root / f"{lang}").is_dir() for lang in mTEDx.LANGPAIRS), \
"do not have downloaded data available for all languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in mTEDx.LANGPAIRS:
tsv_path = cur_root / f"{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.joint:
# Add tgt_lang tags to dict
special_symbols = list({f'<lang:{lang.split("-")[1]}>' for lang in mTEDx.LANGPAIRS})
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.joint),
)
# Make symbolic links to manifests
for lang in mTEDx.LANGPAIRS:
for split in mTEDx.SPLITS:
src_path = cur_root / f"{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/speech_to_text/prep_mtedx_data.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/prep_mtedx_data.py",
"repo_id": "COCO-LM",
"token_count": 4504
}
| 175 |
# Truncated Backpropagation Through Time (BPTT)
Truncated BPTT is a useful technique for training language models on very long
sequences. Typically a long sequences is split into chunks and a language model
is trained over the chunks sequentially. The LM may condition on previous
chunks, but gradients only flow through the current chunk. This technique was
the basis for the paper: [Transformer-XL: Attentive Language Models Beyond a
Fixed-Length Context](https://arxiv.org/abs/1901.02860), which achieved
state-of-the-art language modeling results at the time of publication.
It is slightly tricky to implement Truncated BPTT efficiently in fairseq, since
we need to iterate over the data sequentially and disable any batch shuffling
logic. The code provided in this example illustrates how to implement Truncated
BPTT in fairseq by overriding ``FairseqTask::get_batch_iterator`` to iterate
over the data sequentially. Crucially, this example supports batching and
multi-GPU (data parallel) training.
##### 0. Setup
First, see the general [language modeling README](README.md) for instructions on
preprocessing the WikiText-103 data.
##### 1. Train a Transformer-XL model on WikiText-103
We will train a 16-layer Transformer-XL model following the [hyperparameters
used in the original
paper](https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/run_wt103_base.sh).
The following command assumes 4 GPUs, so that the total batch size is 60
sequences (15 x 4). Training should take ~24 hours on 4 V100 GPUs:
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train \
--user-dir examples/truncated_bptt \
data-bin/wikitext-103/ \
--task truncated_bptt_lm --tokens-per-sample 150 \
--batch-size 15 --max-update 200000 \
--arch transformer_xl --n-layer 16 --d-model 410 --n-head 10 \
--d-head 41 --d-inner 2100 --dropout 0.1 --dropatt 0.0 --mem-len 150 \
--optimizer adam --clip-norm 0.25 \
--lr-scheduler cosine --warmup-updates 0 --min-lr 0.0 --lr 0.00025 \
--log-format json --log-interval 25 \
--fp16
```
If training on a single GPU, set `--update-freq=4` to accumulate 4x gradients
and simulate training on 4 GPUs.
##### 2. Evaluate
```bash
fairseq-eval-lm data-bin/wikitext-103/ \
--path checkpoints/checkpoint_best.pt \
--user-dir examples/truncated_bptt/ \
--task truncated_bptt_lm \
--batch-size 1 --required-batch-size-multiple 1 \
--model-overrides '{"mem_len":640,"clamp_len":400,"same_length":True}' \
--tokens-per-sample 64
# ... | INFO | fairseq_cli.eval_lm | num. model params: 151123537
# ... | INFO | fairseq_cli.eval_lm | Evaluated 245569 tokens in 83.1s (2956.82 tokens/s)
# ... | INFO | fairseq_cli.eval_lm | Loss (base 2): 4.5668, Perplexity: 23.70
# Compare to 24.0 test perplexity from the paper
```
*Note:* During training the model saw 150 tokens of context
(``--tokens-per-sample=150``) and 150 extra memory tokens (``--mem-len=150``).
During evaluation we measure perplexity on sequences of 64 tokens
(``--tokens-per-sample=64``) and increase the memory length
(``--model-overrides='{"mem_len":640}'``). These settings match the evaluation
settings from [the original
paper](https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/run_wt103_base.sh).
|
COCO-LM/fairseq/examples/truncated_bptt/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/truncated_bptt/README.md",
"repo_id": "COCO-LM",
"token_count": 1071
}
| 176 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class DummyLMConfig(FairseqDataclass):
dict_size: int = 49996
dataset_size: int = 100000
tokens_per_sample: int = field(
default=512, metadata={"help": "max sequence length"}
)
add_bos_token: bool = False
batch_size: Optional[int] = II("dataset.batch_size")
max_tokens: Optional[int] = II("dataset.max_tokens")
max_target_positions: int = II("task.tokens_per_sample")
@register_task("dummy_lm", dataclass=DummyLMConfig)
class DummyLMTask(FairseqTask):
def __init__(self, cfg: DummyLMConfig):
super().__init__(cfg)
# load dictionary
self.dictionary = Dictionary()
for i in range(cfg.dict_size):
self.dictionary.add_symbol("word{}".format(i))
self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8
logger.info("dictionary: {} types".format(len(self.dictionary)))
seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1
self.dummy_src = seq[:-1]
self.dummy_tgt = seq[1:]
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.cfg.batch_size is not None:
bsz = self.cfg.batch_size
else:
bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.cfg.tokens_per_sample, dtype=torch.long
),
},
"target": torch.stack([self.dummy_tgt for _ in range(bsz)]),
"nsentences": bsz,
"ntokens": bsz * self.cfg.tokens_per_sample,
},
num_items=self.cfg.dataset_size,
item_size=self.cfg.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
|
COCO-LM/fairseq/fairseq/benchmark/dummy_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/benchmark/dummy_lm.py",
"repo_id": "COCO-LM",
"token_count": 1584
}
| 177 |
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
@dataclass
class CtcCriterionConfig(FairseqDataclass):
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(
default="letter",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
wer_kenlm_model: Optional[str] = field(
default=None,
metadata={
"help": "if this is provided, use kenlm to compute wer (along with other wer_* args)"
},
)
wer_lexicon: Optional[str] = field(
default=None,
metadata={"help": "lexicon to use with wer_kenlm_model"},
)
wer_lm_weight: float = field(
default=2.0,
metadata={"help": "lm weight to use with wer_kenlm_model"},
)
wer_word_score: float = field(
default=-1.0,
metadata={"help": "lm word score to use with wer_kenlm_model"},
)
wer_args: Optional[str] = field(
default=None,
metadata={
"help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)"
},
)
@register_criterion("ctc", dataclass=CtcCriterionConfig)
class CtcCriterion(FairseqCriterion):
def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = task.target_dictionary.index(task.blank_symbol) if hasattr(task, 'blank_symbol') else 0
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
if cfg.wer_args is not None:
(
cfg.wer_kenlm_model,
cfg.wer_lexicon,
cfg.wer_lm_weight,
cfg.wer_word_score,
) = eval(cfg.wer_args)
if cfg.wer_kenlm_model is not None:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = "ctc"
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = -math.inf
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
lprobs = model.get_normalized_probs(
net_output, log_probs=True
).contiguous() # (T, B, C) from the encoder
if "src_lengths" in sample["net_input"]:
input_lengths = sample["net_input"]["src_lengths"]
else:
non_padding_mask = ~net_output["padding_mask"]
input_lengths = non_padding_mask.long().sum(-1)
pad_mask = (sample["target"] != self.pad_idx) & (
sample["target"] != self.eos_idx
)
targets_flat = sample["target"].masked_select(pad_mask)
if "target_lengths" in sample:
target_lengths = sample["target_lengths"]
else:
target_lengths = pad_mask.sum(-1)
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": ntokens,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
if not model.training:
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for lp, t, inp_l in zip(
lprobs_t,
sample["target_label"]
if "target_label" in sample
else sample["target"],
input_lengths,
):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if self.w2l_decoder is not None:
decoded = self.w2l_decoder.decode(lp)
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/ctc.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/ctc.py",
"repo_id": "COCO-LM",
"token_count": 5605
}
| 178 |
# cython: language_level=3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
cimport cython
cimport numpy as np
from libc.stdint cimport int32_t, int64_t
from libcpp cimport bool as bool_t
ctypedef int64_t DTYPE_t
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef list batch_by_size_vec(
np.ndarray[int64_t, ndim=1] indices,
np.ndarray[int64_t, ndim=1] num_tokens_vec,
int64_t max_tokens,
int64_t max_sentences,
int32_t bsz_mult,
):
if indices.shape[0] == 0:
return []
assert max_tokens <= 0 or np.max(num_tokens_vec) <= max_tokens, (
f"Sentences lengths should not exceed max_tokens={max_tokens}"
)
cdef int32_t indices_len = indices.shape[0]
cdef np.ndarray[int32_t, ndim=1] batches_ends = \
np.zeros(indices_len, dtype=np.int32)
cdef int32_t[:] batches_ends_view = batches_ends
cdef int64_t[:] num_tokens_view = num_tokens_vec
cdef int32_t pos = 0
cdef int32_t new_batch_end = 0
cdef int64_t new_batch_max_tokens = 0
cdef int32_t new_batch_sentences = 0
cdef int64_t new_batch_num_tokens = 0
cdef bool_t overflow = False
cdef bool_t size_matches_with_bsz_mult = False
cdef int32_t batches_count = 0
cdef int32_t batch_start = 0
cdef int64_t tail_max_tokens = 0
cdef int64_t batch_max_tokens = 0
for pos in range(indices_len):
# At every pos we keep stats about the last complete batch [batch_start:batch_end),
# and tail [batch_end:pos].
# 1) Every time when (batch + tail) forms a valid batch
# (according to max_tokens, max_sentences and bsz_mult) we append tail to batch.
# 2) When (batch+tail) violates max_tokens or max_sentences constraints
# we finalize running batch, and tail becomes a new batch.
# 3) There is a corner case when tail also violates constraints.
# In that situation [batch_end:pos-1] (tail without the current pos)
# gets added to the finalized batches, while [pos:pos] becomes a new tail.
#
# Important: For the sake of performance try to avoid using function calls within this loop.
tail_max_tokens = tail_max_tokens \
if tail_max_tokens > num_tokens_view[pos] \
else num_tokens_view[pos]
new_batch_end = pos + 1
new_batch_max_tokens = batch_max_tokens \
if batch_max_tokens > tail_max_tokens \
else tail_max_tokens
new_batch_sentences = new_batch_end - batch_start
new_batch_num_tokens = new_batch_sentences * new_batch_max_tokens
overflow = (new_batch_sentences > max_sentences > 0 or
new_batch_num_tokens > max_tokens > 0)
size_matches_with_bsz_mult = (new_batch_sentences < bsz_mult or
new_batch_sentences % bsz_mult == 0)
if overflow:
tail_num_tokens = tail_max_tokens * \
(new_batch_end - batches_ends_view[batches_count])
tail_overflow = tail_num_tokens > max_tokens > 0
# In case of a tail overflow finalize two batches
if tail_overflow:
batches_count += 1
batches_ends_view[batches_count] = pos
tail_max_tokens = num_tokens_view[pos]
batch_start = batches_ends_view[batches_count]
batches_count += 1
new_batch_max_tokens = tail_max_tokens
if overflow or size_matches_with_bsz_mult:
batches_ends_view[batches_count] = new_batch_end
batch_max_tokens = new_batch_max_tokens
tail_max_tokens = 0
if batches_ends_view[batches_count] != indices_len:
batches_count += 1
# Memory and time-efficient split
return np.split(indices, batches_ends[:batches_count])
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef list batch_by_size_fn(
np.ndarray[DTYPE_t, ndim=1] indices,
num_tokens_fn,
int64_t max_tokens,
int64_t max_sentences,
int32_t bsz_mult,
):
cdef int32_t indices_len = indices.shape[0]
cdef np.ndarray[int64_t, ndim=1] num_tokens_vec = np.zeros(indices_len,
dtype=np.int64)
cdef DTYPE_t[:] indices_view = indices
cdef DTYPE_t[:] num_tokens_vec_view = num_tokens_vec
cdef int64_t pos
for pos in range(indices_len):
num_tokens_vec[pos] = num_tokens_fn(indices_view[pos])
return batch_by_size_vec(indices, num_tokens_vec, max_tokens,
max_sentences, bsz_mult,)
cdef _find_valid_shape(
DTYPE_t[:, :] shapes_view,
int64_t num_sentences,
int64_t num_tokens,
):
"""Return index of first valid shape of -1 if none is found."""
for i in range(shapes_view.shape[0]):
if num_sentences <= shapes_view[i][0] and num_tokens <= shapes_view[i][1]:
return i
return -1
@cython.cdivision(True)
cpdef list batch_fixed_shapes_fast(
np.ndarray[DTYPE_t, ndim=1] indices,
num_tokens_fn,
np.ndarray[DTYPE_t, ndim=2] fixed_shapes_sorted,
):
cdef int64_t sample_len = 0
cdef list sample_lens = []
cdef list batch = []
cdef list batches = []
cdef int64_t mod_len
cdef int64_t i
cdef int64_t idx
cdef int64_t num_tokens
cdef DTYPE_t[:] indices_view = indices
cdef DTYPE_t[:, :] shapes_view = fixed_shapes_sorted
for i in range(len(indices_view)):
idx = indices_view[i]
num_tokens = num_tokens_fn(idx)
sample_lens.append(num_tokens)
sample_len = max(sample_len, num_tokens)
shape_idx = _find_valid_shape(shapes_view, len(batch) + 1, sample_len)
if shape_idx == -1:
batches.append(batch)
batch = []
sample_lens = []
sample_len = 0
shapes_view = fixed_shapes_sorted
elif shape_idx > 0:
# small optimization for the next call to _find_valid_shape
shapes_view = shapes_view[shape_idx:]
batch.append(idx)
if len(batch) > 0:
batches.append(batch)
return batches
|
COCO-LM/fairseq/fairseq/data/data_utils_fast.pyx/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/data_utils_fast.pyx",
"repo_id": "COCO-LM",
"token_count": 3041
}
| 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from fairseq.data.encoders import register_tokenizer
from fairseq.dataclass import FairseqDataclass
@register_tokenizer("space", dataclass=FairseqDataclass)
class SpaceTokenizer(object):
def __init__(self, *unused):
self.space_tok = re.compile(r"\s+")
def encode(self, x: str) -> str:
return self.space_tok.sub(" ", x)
def decode(self, x: str) -> str:
return x
|
COCO-LM/fairseq/fairseq/data/encoders/space_tokenizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/space_tokenizer.py",
"repo_id": "COCO-LM",
"token_count": 210
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from fairseq.data import Dictionary, data_utils
from . import BaseWrapperDataset, LRUCacheDataset
class MaskTokensDataset(BaseWrapperDataset):
"""
A wrapper Dataset for masked language modeling.
Input items are masked according to the specified masking probability.
Args:
dataset: Dataset to wrap.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of pad token in vocab
mask_idx: Id of mask token in vocab
return_masked_tokens: controls whether to return the non-masked tokens
(the default) or to return a tensor with the original masked token
IDs (and *pad_idx* elsewhere). The latter is useful as targets for
masked LM training.
seed: Seed for random number generator for reproducibility.
mask_prob: probability of replacing a token with *mask_idx*.
leave_unmasked_prob: probability that a masked token is unmasked.
random_token_prob: probability of replacing a masked token with a
random token from the vocabulary.
freq_weighted_replacement: sample random replacement words based on
word frequencies in the vocab.
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
bpe: BPE to use for whole-word masking.
mask_multiple_length : repeat each mask index multiple times. Default
value is 1.
mask_stdev : standard deviation of masks distribution in case of
multiple masking. Default value is 0.
"""
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
"""Return the source and target datasets for masked LM training."""
dataset = LRUCacheDataset(dataset)
return (
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)),
LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)),
)
def __init__(
self,
dataset: torch.utils.data.Dataset,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
return_masked_tokens: bool = False,
seed: int = 1,
mask_prob: float = 0.15,
leave_unmasked_prob: float = 0.1,
random_token_prob: float = 0.1,
freq_weighted_replacement: bool = False,
mask_whole_words: torch.Tensor = None,
mask_multiple_length: int = 1,
mask_stdev: float = 0.0,
):
assert 0.0 < mask_prob < 1.0
assert 0.0 <= random_token_prob <= 1.0
assert 0.0 <= leave_unmasked_prob <= 1.0
assert random_token_prob + leave_unmasked_prob <= 1.0
assert mask_multiple_length >= 1
assert mask_stdev >= 0.0
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
self.mask_multiple_length = mask_multiple_length
self.mask_stdev = mask_stdev
if random_token_prob > 0.0:
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[: self.vocab.nspecial] = 0
self.weights = weights / weights.sum()
self.epoch = 0
@property
def can_reuse_epoch_itr_across_epochs(self):
return True # only the noise changes, not item sizes
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index: int):
return self.__getitem_cached__(self.seed, self.epoch, index)
@lru_cache(maxsize=8)
def __getitem_cached__(self, seed: int, epoch: int, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert (
self.mask_idx not in item
), "Dataset contains mask_idx (={}), this is not expected!".format(
self.mask_idx,
)
if self.mask_whole_words is not None:
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view(-1)
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert len(words) == sz
word_lens = list(map(len, words))
# decide elements to mask
mask = np.full(sz, False)
num_mask = int(
# add a random number for probabilistic rounding
self.mask_prob * sz / float(self.mask_multiple_length)
+ np.random.rand()
)
# multiple masking as described in the vq-wav2vec paper (https://arxiv.org/abs/1910.05453)
mask_idc = np.random.choice(sz, num_mask, replace=False)
if self.mask_stdev > 0.0:
lengths = np.random.normal(
self.mask_multiple_length, self.mask_stdev, size=num_mask
)
lengths = [max(0, int(round(x))) for x in lengths]
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
],
dtype=np.int64,
)
else:
mask_idc = np.concatenate(
[mask_idc + i for i in range(self.mask_multiple_length)]
)
mask_idc = mask_idc[mask_idc < len(mask)]
try:
mask[mask_idc] = True
except: # something wrong
print(
"Assigning mask indexes {} to mask {} failed!".format(
mask_idc, mask
)
)
raise
if self.return_masked_tokens:
# exit early if we're just returning the masked tokens
# (i.e., the targets for masked LM training)
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1]
return torch.from_numpy(new_item)
# decide unmasking and random replacement
rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob
if rand_or_unmask_prob > 0.0:
rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob)
if self.random_token_prob == 0.0:
unmask = rand_or_unmask
rand_mask = None
elif self.leave_unmasked_prob == 0.0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob
decision = np.random.rand(sz) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
else:
unmask = rand_mask = None
if unmask is not None:
mask = mask ^ unmask
if self.mask_whole_words is not None:
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if rand_mask is not None:
num_rand = rand_mask.sum()
if num_rand > 0:
if self.mask_whole_words is not None:
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(
len(self.vocab),
num_rand,
p=self.weights,
)
return torch.from_numpy(new_item)
|
COCO-LM/fairseq/fairseq/data/mask_tokens_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/mask_tokens_dataset.py",
"repo_id": "COCO-LM",
"token_count": 4426
}
| 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import json
import tempfile
import hashlib
import os
from typing import Hashable
try:
import pyarrow.plasma as plasma
PYARROW_AVAILABLE = True
except ImportError:
plasma = None
PYARROW_AVAILABLE = False
class PlasmaArray:
"""
Wrapper around numpy arrays that automatically moves the data to shared
memory upon serialization. This is particularly helpful when passing numpy
arrays through multiprocessing, so that data is not unnecessarily
duplicated or pickled.
"""
def __init__(self, array):
super().__init__()
self.array = array
self.disable = array.nbytes < 134217728 # disable for arrays <128MB
self.object_id = None
self.path = None
# variables with underscores shouldn't be pickled
self._client = None
self._server = None
self._server_tmp = None
self._plasma = None
@property
def plasma(self):
if self._plasma is None and not self.disable:
self._plasma = plasma
return self._plasma
def start_server(self):
if self.plasma is None or self._server is not None:
return
assert self.object_id is None
assert self.path is None
self._server_tmp = tempfile.NamedTemporaryFile()
self.path = self._server_tmp.name
devnull = open(os.devnull, 'w')
self._server = subprocess.Popen(
["plasma_store", "-m", str(int(1.02 * self.array.nbytes)), "-s", self.path],
stdout=devnull, stderr=devnull
)
@property
def client(self):
if self._client is None:
assert self.path is not None
self._client = self.plasma.connect(self.path, num_retries=200)
return self._client
def __getstate__(self):
"""Called on pickle load"""
if self.plasma is None:
return self.__dict__
if self.object_id is None:
self.start_server()
self.object_id = self.client.put(self.array)
state = self.__dict__.copy()
del state["array"]
state["_client"] = None
state["_server"] = None
state["_server_tmp"] = None
state["_plasma"] = None
return state
def __setstate__(self, state):
"""Called on pickle save"""
self.__dict__.update(state)
if self.plasma is None:
return
self.array = self.client.get(self.object_id)
def __del__(self):
if self._server is not None:
self._server.kill()
self._server = None
self._server_tmp.close()
self._server_tmp = None
DEFAULT_PLASMA_PATH = "/tmp/plasma"
class PlasmaView:
"""Interface to write and read from shared memory. Whereas PlasmaArray writes to plasma on serialization,
PlasmaView writes to shared memory on instantiation."""
def __init__(self, array, split_path: str, hash_data: Hashable, plasma_path=None):
"""
Args:
array: numpy array to store. This can be read with ``PlasmaView().array``
split_path: the path whence the data was read, used for hashing
hash_data: other metadata about the array that can be used to create a unique key.
as of writing, the 3 callers in ``TokenBlockDataset`` use::
hash_data = ((block_size, document_sep_len, str(break_mode), len(dataset)), 0|1|2)
"""
assert PYARROW_AVAILABLE
assert split_path is not None
if plasma_path is None:
plasma_path = DEFAULT_PLASMA_PATH
self.path = plasma_path
self.split_path = split_path
self._client = None # Initialize lazily for pickle. plasma clients should not be deep copied or serialized.
self._n = None
self.object_id = self.get_object_id(self.split_path, hash_data)
try:
self.client.put(array, object_id=self.object_id)
except plasma.PlasmaObjectExists:
pass
@property
def client(self):
if self._client is None:
self._client = plasma.connect(self.path, num_retries=200)
return self._client
@property
def array(self):
"""Fetch a read only view of an np.array, stored in plasma."""
ret = self.client.get(self.object_id)
return ret
@staticmethod
def get_object_id(split_path: str, hash_data: Hashable):
"""Returns plasma.ObjectID from hashing split_path and object_num."""
hash = hashlib.blake2b(bytes(split_path, "utf-8"), digest_size=20)
harg = json.dumps(hash_data).encode("utf-8")
hash.update(harg)
return plasma.ObjectID(hash.digest())
def __getstate__(self):
"""Called on pickle save"""
self.disconnect()
state = self.__dict__.copy()
assert state["_client"] is None
assert "object_id" in state
return state
def __setstate__(self, state):
"""Called on pickle load"""
self.__dict__.update(state)
def __del__(self):
self.disconnect()
def disconnect(self):
if self._client is not None:
self._client.disconnect()
self._client = None
def __len__(self):
"""Save reads by caching len"""
if self._n is None:
self._n = len(self.array)
return self._n
GB100 = (1024 ** 3) * 100
class PlasmaStore:
def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100):
self.server = self.start(path, nbytes)
def __del__(self):
self.server.kill()
@staticmethod
def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen:
if not PYARROW_AVAILABLE:
raise ImportError("please run pip install pyarrow to use --use_plasma_view")
# best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm
_server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path])
plasma.connect(path, num_retries=200) # If we can't connect we fail immediately
return _server
|
COCO-LM/fairseq/fairseq/data/plasma_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/plasma_utils.py",
"repo_id": "COCO-LM",
"token_count": 2659
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
from . import BaseWrapperDataset
logger = logging.getLogger(__name__)
class SubsampleDataset(BaseWrapperDataset):
"""Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples
Args:
dataset (~torch.utils.data.Dataset): dataset to subsample
size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive)
"""
def __init__(self, dataset, size_ratio, shuffle=False):
super().__init__(dataset)
assert size_ratio < 1
self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int)
self.indices = np.random.choice(
list(range(len(self.dataset))), self.actual_size, replace=False
)
self.shuffle = shuffle
logger.info(
"subsampled dataset from {} to {} (ratio={})".format(
len(self.dataset), self.actual_size, size_ratio
)
)
def __getitem__(self, index):
return self.dataset[self.indices[index]]
def __len__(self):
return self.actual_size
def collater(self, samples):
return self.dataset.collater(samples)
@property
def sizes(self):
return self.dataset.sizes[self.indices]
@property
def name(self):
return self.dataset.name
def num_tokens(self, index):
return self.dataset.num_tokens(self.indices[index])
def size(self, index):
return self.dataset.size(self.indices[index])
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def prefetch(self, indices):
self.dataset.prefetch(self.indices[indices])
|
COCO-LM/fairseq/fairseq/data/subsample_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/subsample_dataset.py",
"repo_id": "COCO-LM",
"token_count": 890
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from fairseq.distributed import utils
class TPUDistributedDataParallel(nn.Module):
def __init__(self, module, process_group):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = utils.get_world_size(self.process_group)
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
gradients = []
for p in self.parameters():
if not p.requires_grad:
continue
if p.grad is None:
p.grad = torch.zeros_like(p)
if p.grad.requires_grad:
raise RuntimeError(
"TPUDistributedDataParallel only works with gradients that don't "
"require grad"
)
gradients.append(p.grad)
import torch_xla.core.xla_model as xm
xm.all_reduce(
'sum',
gradients,
scale=1. / self.world_size,
groups=self.process_group[1],
)
|
COCO-LM/fairseq/fairseq/distributed/tpu_distributed_data_parallel.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/distributed/tpu_distributed_data_parallel.py",
"repo_id": "COCO-LM",
"token_count": 598
}
| 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .fairseq_encoder import FairseqEncoder
class CompositeEncoder(FairseqEncoder):
"""
A wrapper around a dictionary of :class:`FairseqEncoder` objects.
We run forward on each encoder and return a dictionary of outputs. The first
encoder's dictionary is used for initialization.
Args:
encoders (dict): a dictionary of :class:`FairseqEncoder` objects.
"""
def __init__(self, encoders):
super().__init__(next(iter(encoders.values())).dictionary)
self.encoders = encoders
for key in self.encoders:
self.add_module(key, self.encoders[key])
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
the outputs from each Encoder
"""
encoder_out = {}
for key in self.encoders:
encoder_out[key] = self.encoders[key](src_tokens, src_lengths)
return encoder_out
def reorder_encoder_out(self, encoder_out, new_order):
"""Reorder encoder output according to new_order."""
for key in self.encoders:
encoder_out[key] = self.encoders[key].reorder_encoder_out(
encoder_out[key], new_order
)
return encoder_out
def max_positions(self):
return min(self.encoders[key].max_positions() for key in self.encoders)
def upgrade_state_dict(self, state_dict):
for key in self.encoders:
self.encoders[key].upgrade_state_dict(state_dict)
return state_dict
|
COCO-LM/fairseq/fairseq/models/composite_encoder.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/composite_encoder.py",
"repo_id": "COCO-LM",
"token_count": 827
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
import torch
from torch import Tensor
@torch.jit.script
def script_skip_tensor_list(x: List[Tensor], mask):
res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x]
outputs = []
for i, t in enumerate(res):
if t.numel() != 0:
outputs.append(t)
else:
outputs.append(x[i])
return outputs
@torch.jit.script
def script_skip_tensor(x: Tensor, mask):
# None case
if x.size(0) == 0:
return x
res = x[mask] if x.size(0) == mask.size(0) else x[:, mask]
if res.numel() == 0:
return x
else:
return res
@torch.jit.script
def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int):
"""
Expand 2D/3D tensor on dim=1
"""
if x is None:
return None
assert x.dim() == 2 or x.dim() == 3
assert trg_dim >= x.size(1), (trg_dim, x.size())
if trg_dim == x.size(1):
return x
dims = [x.size(0), trg_dim - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1)
return x
@torch.jit.script
def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor:
return x if x is not None else y
@torch.jit.script
def fill_tensors(
x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int
) -> Optional[Tensor]:
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None or x.size()[0] == 0 or y is None:
return x
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
if n_selected == 0:
return x
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = torch.tensor(padding_idx).type_as(x)
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
|
COCO-LM/fairseq/fairseq/models/model_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/model_utils.py",
"repo_id": "COCO-LM",
"token_count": 1108
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
CamemBERT: a Tasty French Language Model
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("camembert")
class CamembertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"camembert": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert.v0": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert-base": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert-large": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-large.tar.gz",
"camembert-base-ccnet": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet.tar.gz",
"camembert-base-ccnet-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet-4gb.tar.gz",
"camembert-base-wikipedia-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-wikipedia-4gb.tar.gz",
"camembert-base-oscar-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-oscar-4gb.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
|
COCO-LM/fairseq/fairseq/models/roberta/model_camembert.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/roberta/model_camembert.py",
"repo_id": "COCO-LM",
"token_count": 880
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
|
COCO-LM/fairseq/fairseq/modules/learned_positional_embedding.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/learned_positional_embedding.py",
"repo_id": "COCO-LM",
"token_count": 967
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
|
COCO-LM/fairseq/fairseq/modules/scalar_bias.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/scalar_bias.py",
"repo_id": "COCO-LM",
"token_count": 333
}
| 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adagrad")
class Adagrad(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return False
|
COCO-LM/fairseq/fairseq/optim/adagrad.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/adagrad.py",
"repo_id": "COCO-LM",
"token_count": 503
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import LegacyFairseqLRScheduler, register_lr_scheduler
import logging
import ast
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
@register_lr_scheduler("manual")
class ManualSchedule(LegacyFairseqLRScheduler):
"""Decay the LR on a manual schedule."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
self.epoch2lr = self.parse_manuallr_args(args.epoch2lr)
self.update2lr = self.parse_manuallr_args(args.update2lr)
logger.info("@@@ ManualSchedule epoch2lr={}".format(self.epoch2lr))
logger.info("@@@ ManualSchedule update2lr={}".format(self.update2lr))
if 1 in self.epoch2lr:
self.lr = self.epoch2lr[1]
elif 1 in self.update2lr:
self.lr = self.update2lr[1]
else:
self.lr = args.lr[0]
self.optimizer.set_lr(self.lr) # Set the beginning of the epoch.
def parse_manuallr_args(self, lr_args_str):
lr_dict = ast.literal_eval(lr_args_str.replace(' ', ''))
if not isinstance(lr_dict, dict):
raise ValueError("epoch2lr/update2lr must be abel to evaluated to a dict")
lr_args = {}
logger.info("@@@ after parsing input dictionary lr_dict = {}".format(lr_dict))
for key, val in lr_dict.items():
if "," in key:
for k in key.split(","):
lr_args[int(k)] = float(val)
elif "-" in key:
s = int(key.split("-")[0])
e = int(key.split("-")[1])
for k in range(s, e + 1, 1):
lr_args[k] = float(val)
else:
lr_args[int(key)] = float(val)
return lr_args
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
# fmt: off
parser.add_argument(
"--epoch2lr",
type=str,
metavar="DICT",
default="{}",
help="a dictionary used to set lr for each epoch manually",
)
parser.add_argument(
"--update2lr",
type=str,
metavar="DICT",
default="{}",
help="a dictionary used to set lr for each update manually",
)
# fmt: on
def state_dict(self):
return {"lr": self.lr}
def load_state_dict(self, state_dict):
if "lr" in state_dict:
self.lr = state_dict["lr"]
def get_next_lr(self, epoch):
manual_keys = [k for k in self.epoch2lr if k <= epoch]
if manual_keys:
manual_lr = self.epoch2lr[max(manual_keys)]
else:
logger.warning("@@@ epoch={} does not exist in manual lr input. epoch2lr={}...".format(
epoch, list(self.epoch2lr.items())[:min(10, len(self.epoch2lr.keys())-1)]
))
manual_lr = self.optimizer.get_lr()
return manual_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
manual_keys = [k for k in self.update2lr if k <= num_updates]
if manual_keys:
manual_lr = self.update2lr[max(manual_keys)]
else:
logger.warning("epoch={} does not exist in manual lr input update2lr={}...".format(
num_updates, list(self.update2lr.items())[:min(10, len(self.update2lr.keys())-1)]))
manual_lr = self.optimizer.get_lr()
self.optimizer.set_lr(manual_lr)
return self.optimizer.get_lr()
|
COCO-LM/fairseq/fairseq/optim/lr_scheduler/manual_lr_scheduler.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/manual_lr_scheduler.py",
"repo_id": "COCO-LM",
"token_count": 1882
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unicodedata
from fairseq.dataclass import ChoiceEnum
class EvaluationTokenizer(object):
"""A generic evaluation-time tokenizer, which leverages built-in tokenizers
in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides
lowercasing, punctuation removal and character tokenization, which are
applied after sacreBLEU tokenization.
Args:
tokenizer_type (str): the type of sacreBLEU tokenizer to apply.
lowercase (bool): lowercase the text.
punctuation_removal (bool): remove punctuation (based on unicode
category) from text.
character_tokenization (bool): tokenize the text to characters.
"""
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
ALL_TOKENIZER_TYPES = ChoiceEnum(["none", "13a", "intl", "zh", "ja-mecab"])
def __init__(
self,
tokenizer_type: str = "13a",
lowercase: bool = False,
punctuation_removal: bool = False,
character_tokenization: bool = False,
):
from sacrebleu.tokenizers import TOKENIZERS
assert tokenizer_type in TOKENIZERS, f"{tokenizer_type}, {TOKENIZERS}"
self.lowercase = lowercase
self.punctuation_removal = punctuation_removal
self.character_tokenization = character_tokenization
self.tokenizer = TOKENIZERS[tokenizer_type]
@classmethod
def remove_punctuation(cls, sent: str):
"""Remove punctuation based on Unicode category."""
return cls.SPACE.join(
t
for t in sent.split(cls.SPACE)
if not all(unicodedata.category(c)[0] == "P" for c in t)
)
def tokenize(self, sent: str):
tokenized = self.tokenizer()(sent)
if self.punctuation_removal:
tokenized = self.remove_punctuation(tokenized)
if self.character_tokenization:
tokenized = self.SPACE.join(
list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE))
)
if self.lowercase:
tokenized = tokenized.lower()
return tokenized
|
COCO-LM/fairseq/fairseq/scoring/tokenizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/scoring/tokenizer.py",
"repo_id": "COCO-LM",
"token_count": 914
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from collections import OrderedDict
from fairseq import utils
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
data_utils,
indexed_dataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from . import register_task
from .multilingual_translation import MultilingualTranslationTask
logger = logging.getLogger(__name__)
def _get_bt_dataset_key(lang_pair):
return "bt:" + lang_pair
def _get_denoising_dataset_key(lang_pair):
return "denoising:" + lang_pair
# ported from UnsupervisedMT
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
split = x.split(",")
if len(split) == 1:
return float(x), None
else:
split = [s.split(os.pathsep) for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(
int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)
)
return float(split[0][1]), [(int(k), float(v)) for k, v in split]
@register_task("semisupervised_translation")
class SemisupervisedTranslationTask(MultilingualTranslationTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, instead of `--lang-pairs`.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (parallel data). '
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG',
help='Cross-entropy reconstruction coefficient (denoising autoencoding)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N',
help='beam size used in beam search of online back-translation')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config(
args.lambda_parallel_config
)
self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config(
args.lambda_otf_bt_config
)
self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config(
args.lambda_denoising_config
)
if self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None:
denoising_lang_pairs = [
"%s-%s" % (tgt, tgt)
for tgt in {lang_pair.split("-")[1] for lang_pair in args.lang_pairs}
]
self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs
self.backtranslate_datasets = {}
self.backtranslators = {}
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = MultilingualTranslationTask.prepare(args, **kwargs)
return cls(args, dicts, training)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def split_exists(split, src, tgt, lang):
if src is not None:
filename = os.path.join(
data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)
)
else:
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, src, tgt)
)
return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl)
def load_indexed_dataset(path, dictionary):
return data_utils.load_indexed_dataset(
path, dictionary, self.args.dataset_impl
)
# load parallel datasets
src_datasets, tgt_datasets = {}, {}
if (
self.lambda_parallel > 0.0
or self.lambda_parallel_steps is not None
or not split.startswith("train")
):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
if split_exists(split, src, tgt, src):
prefix = os.path.join(
data_path, "{}.{}-{}.".format(split, src, tgt)
)
elif split_exists(split, tgt, src, src):
prefix = os.path.join(
data_path, "{}.{}-{}.".format(split, tgt, src)
)
else:
continue
src_datasets[lang_pair] = load_indexed_dataset(
prefix + src, self.dicts[src]
)
tgt_datasets[lang_pair] = load_indexed_dataset(
prefix + tgt, self.dicts[tgt]
)
logger.info(
"parallel-{} {} {} examples".format(
data_path, split, len(src_datasets[lang_pair])
)
)
if len(src_datasets) == 0:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
# back translation datasets
backtranslate_datasets = {}
if (
self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None
) and split.startswith("train"):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
if not split_exists(split, tgt, None, tgt):
raise FileNotFoundError(
"Dataset not found: backtranslation {} ({})".format(
split, data_path
)
)
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, tgt, tgt)
)
dataset = load_indexed_dataset(filename, self.dicts[tgt])
lang_pair_dataset_tgt = LanguagePairDataset(
dataset,
dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
lang_pair_dataset = LanguagePairDataset(
dataset,
dataset.sizes,
src_dict=self.dicts[src],
tgt=dataset,
tgt_sizes=dataset.sizes,
tgt_dict=self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
backtranslate_datasets[lang_pair] = BacktranslationDataset(
tgt_dataset=self.alter_dataset_langtok(
lang_pair_dataset_tgt,
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_lang=src,
),
backtranslation_fn=self.backtranslators[lang_pair],
src_dict=self.dicts[src],
tgt_dict=self.dicts[tgt],
output_collater=self.alter_dataset_langtok(
lang_pair_dataset=lang_pair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
).collater,
)
logger.info(
"backtranslate-{}: {} {} {} examples".format(
tgt,
data_path,
split,
len(backtranslate_datasets[lang_pair]),
)
)
self.backtranslate_datasets[lang_pair] = backtranslate_datasets[
lang_pair
]
# denoising autoencoder
noising_datasets = {}
if (
self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None
) and split.startswith("train"):
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split("-")
if not split_exists(split, tgt, None, tgt):
continue
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, tgt, tgt)
)
tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt])
tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt])
noising_dataset = NoisingDataset(
tgt_dataset1,
self.dicts[tgt],
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noising_datasets[lang_pair] = self.alter_dataset_langtok(
LanguagePairDataset(
noising_dataset,
tgt_dataset1.sizes,
self.dicts[tgt],
tgt_dataset2,
tgt_dataset2.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
logger.info(
"denoising-{}: {} {} {} examples".format(
tgt,
data_path,
split,
len(noising_datasets[lang_pair]),
)
)
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split("-")
src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair]
return self.alter_dataset_langtok(
LanguagePairDataset(
src_dataset,
src_dataset.sizes,
self.dicts[src],
tgt_dataset,
tgt_dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
self.dicts[src].eos(),
src,
self.dicts[tgt].eos(),
tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict(
[
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in src_datasets.keys()
]
+ [
(_get_bt_dataset_key(lang_pair), dataset)
for lang_pair, dataset in backtranslate_datasets.items()
]
+ [
(_get_denoising_dataset_key(lang_pair), dataset)
for lang_pair, dataset in noising_datasets.items()
]
),
eval_key=None
if self.training
else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"SemisupervisedTranslationTask requires a FairseqMultiModel architecture"
)
# create SequenceGenerator for each model that has backtranslation dependency on it
self.sequence_generators = {}
if (
self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None
) and self.training:
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
key = "{}-{}".format(tgt, src)
self.sequence_generators[key] = SequenceGenerator(
[model.models[key]],
tgt_dict=self.dicts[src],
beam_size=args.bt_beam_size,
max_len_a=args.bt_max_len_a,
max_len_b=args.bt_max_len_b,
)
decoder_lang_tok_idx = self.get_decoder_langtok(src)
def backtranslate_fn(
sample,
model=model.models[key],
bos_token=decoder_lang_tok_idx,
sequence_generator=self.sequence_generators[key],
):
return sequence_generator.generate(
[model],
sample,
bos_token=bos_token,
)
self.backtranslators[lang_pair] = backtranslate_fn
return model
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
if update_num > 0:
self.update_step(update_num)
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, {}
def forward_backward(model, samples, logging_output_key, weight):
nonlocal agg_loss, agg_sample_size, agg_logging_output
if samples is None or len(samples) == 0:
return
loss, sample_size, logging_output = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[logging_output_key] += logging_output[k]
if self.lambda_parallel > 0.0:
for lang_pair in self.lang_pairs:
forward_backward(
model.models[lang_pair],
sample[lang_pair],
lang_pair,
self.lambda_parallel,
)
if self.lambda_otf_bt > 0.0:
for lang_pair in self.lang_pairs:
sample_key = _get_bt_dataset_key(lang_pair)
forward_backward(
model.models[lang_pair],
sample[sample_key],
sample_key,
self.lambda_otf_bt,
)
if self.lambda_denoising > 0.0:
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split("-")
sample_key = _get_denoising_dataset_key(lang_pair)
forward_backward(
model.models["{0}-{0}".format(tgt)],
sample[sample_key],
sample_key,
self.lambda_denoising,
)
return agg_loss, agg_sample_size, agg_logging_output
def update_step(self, num_updates):
def lambda_step_func(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [
i
for i in range(len(config) - 1)
if config[i][0] <= n_iter < config[i + 1][0]
]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
if self.lambda_parallel_steps is not None:
self.lambda_parallel = lambda_step_func(
self.lambda_parallel_steps, num_updates
)
if self.lambda_denoising_steps is not None:
self.lambda_denoising = lambda_step_func(
self.lambda_denoising_steps, num_updates
)
if self.lambda_otf_bt_steps is not None:
self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
|
COCO-LM/fairseq/fairseq/tasks/semisupervised_translation.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/semisupervised_translation.py",
"repo_id": "COCO-LM",
"token_count": 11142
}
| 193 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.