text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
{
"modelname": "TemplatePT",
"uppercase_modelname": "TEMPLATE_PT",
"lowercase_modelname": "template_pt",
"camelcase_modelname": "TemplatePt",
"authors": "The HuggingFace Team",
"checkpoint_identifier": "brand-new-bert-base-cased",
"tokenizer_type": "Based on BERT",
"generate_tensorflow_and_pytorch": "PyTorch",
"is_encoder_decoder_model": "False"
}
|
AdaMix/templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json/0
|
{
"file_path": "AdaMix/templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json",
"repo_id": "AdaMix",
"token_count": 145
}
| 67 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class BenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sshleifer/tiny-distilbert-base-uncased-finetuned-sst-2-english"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
only_pretrain_model=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_torchscript(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
torchscript=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_inference_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
fp16=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_model_no_architectures(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
# set architectures equal to `None`
config.architectures = None
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_train_no_configs_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
fp16=True,
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
|
AdaMix/tests/test_benchmark.py/0
|
{
"file_path": "AdaMix/tests/test_benchmark.py",
"repo_id": "AdaMix",
"token_count": 4950
}
| 68 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import time
import unittest
from requests.exceptions import HTTPError
from transformers.hf_api import HfApi, HfFolder, ModelInfo, RepoObj
from transformers.testing_utils import require_git_lfs
USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
ENDPOINT_STAGING_BASIC_AUTH = f"https://{USER}:{PASS}@moon-staging.huggingface.co"
REPO_NAME = "my-model-{}".format(int(time.time()))
REPO_NAME_LARGE_FILE = "my-model-largefiles-{}".format(int(time.time()))
WORKING_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/working_repo")
LARGE_FILE_14MB = "https://cdn-media.huggingface.co/lfs-largefiles/progit.epub"
LARGE_FILE_18MB = "https://cdn-media.huggingface.co/lfs-largefiles/progit.pdf"
class HfApiCommonTest(unittest.TestCase):
_api = HfApi(endpoint=ENDPOINT_STAGING)
class HfApiLoginTest(HfApiCommonTest):
def test_login_invalid(self):
with self.assertRaises(HTTPError):
self._api.login(username=USER, password="fake")
def test_login_valid(self):
token = self._api.login(username=USER, password=PASS)
self.assertIsInstance(token, str)
class HfApiEndpointsTest(HfApiCommonTest):
@classmethod
def setUpClass(cls):
"""
Share this valid token in all tests below.
"""
cls._token = cls._api.login(username=USER, password=PASS)
def test_whoami(self):
user, orgs = self._api.whoami(token=self._token)
self.assertEqual(user, USER)
self.assertIsInstance(orgs, list)
def test_list_repos_objs(self):
objs = self._api.list_repos_objs(token=self._token)
self.assertIsInstance(objs, list)
if len(objs) > 0:
o = objs[-1]
self.assertIsInstance(o, RepoObj)
def test_create_and_delete_repo(self):
self._api.create_repo(token=self._token, name=REPO_NAME)
self._api.delete_repo(token=self._token, name=REPO_NAME)
class HfApiPublicTest(unittest.TestCase):
def test_staging_model_list(self):
_api = HfApi(endpoint=ENDPOINT_STAGING)
_ = _api.model_list()
def test_model_list(self):
_api = HfApi()
models = _api.model_list()
self.assertGreater(len(models), 100)
self.assertIsInstance(models[0], ModelInfo)
class HfFolderTest(unittest.TestCase):
def test_token_workflow(self):
"""
Test the whole token save/get/delete workflow,
with the desired behavior with respect to non-existent tokens.
"""
token = "token-{}".format(int(time.time()))
HfFolder.save_token(token)
self.assertEqual(HfFolder.get_token(), token)
HfFolder.delete_token()
HfFolder.delete_token()
# ^^ not an error, we test that the
# second call does not fail.
self.assertEqual(HfFolder.get_token(), None)
@require_git_lfs
class HfLargefilesTest(HfApiCommonTest):
@classmethod
def setUpClass(cls):
"""
Share this valid token in all tests below.
"""
cls._token = cls._api.login(username=USER, password=PASS)
def setUp(self):
try:
shutil.rmtree(WORKING_REPO_DIR)
except FileNotFoundError:
pass
def tearDown(self):
self._api.delete_repo(token=self._token, name=REPO_NAME_LARGE_FILE)
def setup_local_clone(self, REMOTE_URL):
REMOTE_URL_AUTH = REMOTE_URL.replace(ENDPOINT_STAGING, ENDPOINT_STAGING_BASIC_AUTH)
subprocess.run(["git", "clone", REMOTE_URL_AUTH, WORKING_REPO_DIR], check=True, capture_output=True)
subprocess.run(["git", "lfs", "track", "*.pdf"], check=True, cwd=WORKING_REPO_DIR)
subprocess.run(["git", "lfs", "track", "*.epub"], check=True, cwd=WORKING_REPO_DIR)
def test_end_to_end_thresh_6M(self):
REMOTE_URL = self._api.create_repo(
token=self._token, name=REPO_NAME_LARGE_FILE, lfsmultipartthresh=6 * 10 ** 6
)
self.setup_local_clone(REMOTE_URL)
subprocess.run(["wget", LARGE_FILE_18MB], check=True, capture_output=True, cwd=WORKING_REPO_DIR)
subprocess.run(["git", "add", "*"], check=True, cwd=WORKING_REPO_DIR)
subprocess.run(["git", "commit", "-m", "commit message"], check=True, cwd=WORKING_REPO_DIR)
# This will fail as we haven't set up our custom transfer agent yet.
failed_process = subprocess.run(["git", "push"], capture_output=True, cwd=WORKING_REPO_DIR)
self.assertEqual(failed_process.returncode, 1)
self.assertIn("transformers-cli lfs-enable-largefiles", failed_process.stderr.decode())
# ^ Instructions on how to fix this are included in the error message.
subprocess.run(["transformers-cli", "lfs-enable-largefiles", WORKING_REPO_DIR], check=True)
start_time = time.time()
subprocess.run(["git", "push"], check=True, cwd=WORKING_REPO_DIR)
print("took", time.time() - start_time)
# To be 100% sure, let's download the resolved file
pdf_url = f"{REMOTE_URL}/resolve/main/progit.pdf"
DEST_FILENAME = "uploaded.pdf"
subprocess.run(["wget", pdf_url, "-O", DEST_FILENAME], check=True, capture_output=True, cwd=WORKING_REPO_DIR)
dest_filesize = os.stat(os.path.join(WORKING_REPO_DIR, DEST_FILENAME)).st_size
self.assertEqual(dest_filesize, 18685041)
def test_end_to_end_thresh_16M(self):
# Here we'll push one multipart and one non-multipart file in the same commit, and see what happens
REMOTE_URL = self._api.create_repo(
token=self._token, name=REPO_NAME_LARGE_FILE, lfsmultipartthresh=16 * 10 ** 6
)
self.setup_local_clone(REMOTE_URL)
subprocess.run(["wget", LARGE_FILE_18MB], check=True, capture_output=True, cwd=WORKING_REPO_DIR)
subprocess.run(["wget", LARGE_FILE_14MB], check=True, capture_output=True, cwd=WORKING_REPO_DIR)
subprocess.run(["git", "add", "*"], check=True, cwd=WORKING_REPO_DIR)
subprocess.run(["git", "commit", "-m", "both files in same commit"], check=True, cwd=WORKING_REPO_DIR)
subprocess.run(["transformers-cli", "lfs-enable-largefiles", WORKING_REPO_DIR], check=True)
start_time = time.time()
subprocess.run(["git", "push"], check=True, cwd=WORKING_REPO_DIR)
print("took", time.time() - start_time)
|
AdaMix/tests/test_hf_api.py/0
|
{
"file_path": "AdaMix/tests/test_hf_api.py",
"repo_id": "AdaMix",
"token_count": 3012
}
| 69 |
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLConfig,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class CTRLModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def create_and_check_ctrl_for_sequence_classification(self, config, input_ids, head_mask, token_type_ids, *args):
config.num_labels = self.num_labels
model = CTRLForSequenceClassification(config)
model.to(torch_device)
model.eval()
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
@require_torch
class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else ()
test_pruning = True
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class CTRLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_ctrl(self):
model = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(torch_device)
input_ids = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device
) # Legal the president is
expected_output_ids = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
|
AdaMix/tests/test_modeling_ctrl.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_ctrl.py",
"repo_id": "AdaMix",
"token_count": 3832
}
| 70 |
# coding=utf-8
# Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertConfig,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class SqueezeBertModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=64,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
q_groups=2,
k_groups=2,
v_groups=2,
post_attention_groups=2,
intermediate_groups=4,
output_groups=1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.q_groups = q_groups
self.k_groups = k_groups
self.v_groups = v_groups
self.post_attention_groups = post_attention_groups
self.intermediate_groups = intermediate_groups
self.output_groups = output_groups
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = SqueezeBertConfig(
embedding_size=self.hidden_size,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
attention_probs_dropout_prob=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
q_groups=self.q_groups,
k_groups=self.k_groups,
v_groups=self.v_groups,
post_attention_groups=self.post_attention_groups,
intermediate_groups=self.intermediate_groups,
output_groups=self.output_groups,
)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_squeezebert_model(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = SqueezeBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, input_mask)
result = model(input_ids)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)
)
def create_and_check_squeezebert_for_masked_lm(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = SqueezeBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_squeezebert_for_question_answering(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = SqueezeBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_squeezebert_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = SqueezeBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_squeezebert_for_token_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = SqueezeBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_squeezebert_for_multiple_choice(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = SqueezeBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SqueezeBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
test_pruning = False
test_torchscript = True
test_resize_embeddings = True
test_head_masking = False
def setUp(self):
self.model_tester = SqueezeBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=SqueezeBertConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_squeezebert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = SqueezeBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_sentencepiece
@require_tokenizers
@require_torch
class SqueezeBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_classification_head(self):
model = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
input_ids = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
|
AdaMix/tests/test_modeling_squeezebert.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_squeezebert.py",
"repo_id": "AdaMix",
"token_count": 5715
}
| 71 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import ElectraConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.models.electra.modeling_tf_electra import (
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
)
class TFElectraModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.embedding_size = 128
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = ElectraConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_electra_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_electra_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_electra_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraForPreTraining(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
def create_and_check_electra_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFElectraForSequenceClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_electra_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFElectraForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_electra_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFElectraForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_electra_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFElectraForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFElectraModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFElectraModel,
TFElectraForMaskedLM,
TFElectraForPreTraining,
TFElectraForTokenClassification,
TFElectraForMultipleChoice,
TFElectraForSequenceClassification,
TFElectraForQuestionAnswering,
)
if is_tf_available()
else ()
)
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFElectraModelTester(self)
self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_electra_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_sequence_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
# for model_name in TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/electra-small-discriminator"]:
model = TFElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFElectraModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = TFElectraForPreTraining.from_pretrained("lysandre/tiny-electra-random")
input_ids = tf.constant([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
expected_shape = [1, 6]
self.assertEqual(output.shape, expected_shape)
print(output[:, :3])
expected_slice = tf.constant([[-0.24651965, 0.8835437, 1.823782]])
tf.debugging.assert_near(output[:, :3], expected_slice, atol=1e-4)
|
AdaMix/tests/test_modeling_tf_electra.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_tf_electra.py",
"repo_id": "AdaMix",
"token_count": 4804
}
| 72 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import RobertaConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.roberta.modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaModel,
)
class TFRobertaModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_roberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRobertaModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_roberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRobertaForMaskedLM(config=config)
result = model([input_ids, input_mask, token_type_ids])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_roberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFRobertaForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_roberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRobertaForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_roberta_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFRobertaForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFRobertaModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFRobertaModel,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaForQuestionAnswering,
)
if is_tf_available()
else ()
)
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFRobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_roberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_question_answering(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFRobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
@require_sentencepiece
@require_tokenizers
class TFRobertaModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = TFRobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = [1, 11, 50265]
self.assertEqual(list(output.numpy().shape), expected_shape)
# compare the actual values for a slice.
expected_slice = tf.constant(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
@slow
def test_inference_no_head(self):
model = TFRobertaModel.from_pretrained("roberta-base")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = tf.constant(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
@slow
def test_inference_classification_head(self):
model = TFRobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = [1, 3]
self.assertEqual(list(output.numpy().shape), expected_shape)
expected_tensor = tf.constant([[-0.9469, 0.3913, 0.5118]])
self.assertTrue(numpy.allclose(output.numpy(), expected_tensor.numpy(), atol=1e-4))
|
AdaMix/tests/test_modeling_tf_roberta.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_tf_roberta.py",
"repo_id": "AdaMix",
"token_count": 4791
}
| 73 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from unittest import mock
from transformers import is_tf_available, is_torch_available, pipeline
from transformers.file_utils import to_py_obj
from transformers.pipelines import Pipeline
from transformers.testing_utils import _run_slow_tests, is_pipeline_test, require_tf, require_torch, slow
VALID_INPUTS = ["A simple string", ["list of strings"]]
@is_pipeline_test
class CustomInputPipelineCommonMixin:
pipeline_task = None
pipeline_loading_kwargs = {} # Additional kwargs to load the pipeline with
pipeline_running_kwargs = {} # Additional kwargs to run the pipeline with
small_models = [] # Models tested without the @slow decorator
large_models = [] # Models tested with the @slow decorator
valid_inputs = VALID_INPUTS # Some inputs which are valid to compare fast and slow tokenizers
def setUp(self) -> None:
if not is_tf_available() and not is_torch_available():
return # Currently no JAX pipelines
# Download needed checkpoints
models = self.small_models
if _run_slow_tests:
models = models + self.large_models
for model_name in models:
if is_torch_available():
pipeline(
self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="pt",
**self.pipeline_loading_kwargs,
)
if is_tf_available():
pipeline(
self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="tf",
**self.pipeline_loading_kwargs,
)
@require_torch
@slow
def test_pt_defaults(self):
pipeline(self.pipeline_task, framework="pt", **self.pipeline_loading_kwargs)
@require_tf
@slow
def test_tf_defaults(self):
pipeline(self.pipeline_task, framework="tf", **self.pipeline_loading_kwargs)
@require_torch
def test_torch_small(self):
for model_name in self.small_models:
nlp = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="pt",
**self.pipeline_loading_kwargs,
)
self._test_pipeline(nlp)
@require_tf
def test_tf_small(self):
for model_name in self.small_models:
nlp = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="tf",
**self.pipeline_loading_kwargs,
)
self._test_pipeline(nlp)
@require_torch
@slow
def test_torch_large(self):
for model_name in self.large_models:
nlp = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="pt",
**self.pipeline_loading_kwargs,
)
self._test_pipeline(nlp)
@require_tf
@slow
def test_tf_large(self):
for model_name in self.large_models:
nlp = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="tf",
**self.pipeline_loading_kwargs,
)
self._test_pipeline(nlp)
def _test_pipeline(self, nlp: Pipeline):
raise NotImplementedError
@require_torch
def test_compare_slow_fast_torch(self):
for model_name in self.small_models:
nlp_slow = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="pt",
use_fast=False,
**self.pipeline_loading_kwargs,
)
nlp_fast = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="pt",
use_fast=True,
**self.pipeline_loading_kwargs,
)
self._compare_slow_fast_pipelines(nlp_slow, nlp_fast, method="forward")
@require_tf
def test_compare_slow_fast_tf(self):
for model_name in self.small_models:
nlp_slow = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="tf",
use_fast=False,
**self.pipeline_loading_kwargs,
)
nlp_fast = pipeline(
task=self.pipeline_task,
model=model_name,
tokenizer=model_name,
framework="tf",
use_fast=True,
**self.pipeline_loading_kwargs,
)
self._compare_slow_fast_pipelines(nlp_slow, nlp_fast, method="call")
def _compare_slow_fast_pipelines(self, nlp_slow: Pipeline, nlp_fast: Pipeline, method: str):
"""We check that the inputs to the models forward passes are identical for
slow and fast tokenizers.
"""
with mock.patch.object(
nlp_slow.model, method, wraps=getattr(nlp_slow.model, method)
) as mock_slow, mock.patch.object(nlp_fast.model, method, wraps=getattr(nlp_fast.model, method)) as mock_fast:
for inputs in self.valid_inputs:
if isinstance(inputs, dict):
inputs.update(self.pipeline_running_kwargs)
_ = nlp_slow(**inputs)
_ = nlp_fast(**inputs)
else:
_ = nlp_slow(inputs, **self.pipeline_running_kwargs)
_ = nlp_fast(inputs, **self.pipeline_running_kwargs)
mock_slow.assert_called()
mock_fast.assert_called()
self.assertEqual(len(mock_slow.call_args_list), len(mock_fast.call_args_list))
for mock_slow_call_args, mock_fast_call_args in zip(
mock_slow.call_args_list, mock_slow.call_args_list
):
slow_call_args, slow_call_kwargs = mock_slow_call_args
fast_call_args, fast_call_kwargs = mock_fast_call_args
slow_call_args, slow_call_kwargs = to_py_obj(slow_call_args), to_py_obj(slow_call_kwargs)
fast_call_args, fast_call_kwargs = to_py_obj(fast_call_args), to_py_obj(fast_call_kwargs)
self.assertEqual(slow_call_args, fast_call_args)
self.assertDictEqual(slow_call_kwargs, fast_call_kwargs)
@is_pipeline_test
class MonoInputPipelineCommonMixin(CustomInputPipelineCommonMixin):
"""A version of the CustomInputPipelineCommonMixin
with a predefined `_test_pipeline` method.
"""
mandatory_keys = {} # Keys which should be in the output
invalid_inputs = [None] # inputs which are not allowed
expected_multi_result: Optional[List] = None
expected_check_keys: Optional[List[str]] = None
def _test_pipeline(self, nlp: Pipeline):
self.assertIsNotNone(nlp)
mono_result = nlp(self.valid_inputs[0], **self.pipeline_running_kwargs)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], (dict, list))
if isinstance(mono_result[0], list):
mono_result = mono_result[0]
for key in self.mandatory_keys:
self.assertIn(key, mono_result[0])
multi_result = [nlp(input, **self.pipeline_running_kwargs) for input in self.valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
if self.expected_multi_result is not None:
for result, expect in zip(multi_result, self.expected_multi_result):
for key in self.expected_check_keys or []:
self.assertEqual(
set([o[key] for o in result]),
set([o[key] for o in expect]),
)
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in self.mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, self.invalid_inputs)
|
AdaMix/tests/test_pipelines_common.py/0
|
{
"file_path": "AdaMix/tests/test_pipelines_common.py",
"repo_id": "AdaMix",
"token_count": 4465
}
| 74 |
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin):
# to overwrite at feature extractactor specific tests
feat_extract_tester = None
feature_extraction_class = None
@property
def feat_extract_dict(self):
return self.feat_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_common_properties(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feat_extract, "feature_size"))
self.assertTrue(hasattr(feat_extract, "sampling_rate"))
self.assertTrue(hasattr(feat_extract, "padding_value"))
def test_batch_feature(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name])))
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
@require_torch
def test_batch_feature_pt(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
@require_tf
def test_batch_feature_tf(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
def _check_padding(self, numpify=False):
def _inputs_have_equal_length(input):
length = len(input[0])
for input_slice in input[1:]:
if len(input_slice) != length:
return False
return True
def _inputs_are_equal(input_1, input_2):
if len(input_1) != len(input_2):
return False
for input_slice_1, input_slice_2 in zip(input_1, input_2):
if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3):
return False
return True
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
pad_diff = self.feat_extract_tester.seq_length_diff
pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff
pad_min_length = self.feat_extract_tester.min_seq_length
batch_size = self.feat_extract_tester.batch_size
feature_size = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
input_1 = feat_extract.pad(processed_features, padding=False)[input_name]
input_2 = feat_extract.pad(processed_features, padding="longest")[input_name]
input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1]))[
input_name
]
input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="max_length")[input_name]
input_5 = feat_extract.pad(
processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np"
)[input_name]
self.assertFalse(_inputs_have_equal_length(input_1))
self.assertTrue(_inputs_have_equal_length(input_2))
self.assertTrue(_inputs_have_equal_length(input_3))
self.assertTrue(_inputs_are_equal(input_2, input_3))
self.assertTrue(len(input_1[0]) == pad_min_length)
self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff)
self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0])))
self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10)[input_name]
input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10)[input_name]
input_8 = feat_extract.pad(
processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length
)[input_name]
input_9 = feat_extract.pad(
processed_features,
padding="max_length",
pad_to_multiple_of=10,
max_length=pad_max_length,
return_tensors="np",
)[input_name]
self.assertTrue(all(len(x) % 10 == 0 for x in input_6))
self.assertTrue(_inputs_are_equal(input_6, input_7))
expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8))
self.assertTrue(input_9.shape[:2], (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_9.shape[2] == feature_size)
# Check padding value is correct
padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3
)
self.assertTrue(
abs(
np.asarray(input_2[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)
)
< 1e-3
)
self.assertTrue(
abs(
np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)
)
< 1e-3
)
self.assertTrue(
abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3
)
self.assertTrue(
abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3
)
def test_padding_from_list(self):
self._check_padding(numpify=False)
def test_padding_from_array(self):
self._check_padding(numpify=True)
@require_torch
def test_padding_accepts_tensors_pt(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().sum()) < 1e-2)
@require_tf
def test_padding_accepts_tensors_tf(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().sum()) < 1e-2)
def test_attention_mask(self):
feat_dict = self.feat_extract_dict
feat_dict["return_attention_mask"] = True
feat_extract = self.feature_extraction_class(**feat_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_lenghts = [len(x) for x in speech_inputs]
input_name = feat_extract.model_input_names[0]
processed = BatchFeature({input_name: speech_inputs})
processed = feat_extract.pad(processed, padding="longest", return_tensors="np")
self.assertIn("attention_mask", processed)
self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lenghts)
|
AdaMix/tests/test_sequence_feature_extraction_common.py/0
|
{
"file_path": "AdaMix/tests/test_sequence_feature_extraction_common.py",
"repo_id": "AdaMix",
"token_count": 4815
}
| 75 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from .test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class DistilBertTokenizationTest(BertTokenizationTest):
tokenizer_class = DistilBertTokenizer
rust_tokenizer_class = DistilBertTokenizerFast
test_rust_tokenizer = True
@slow
def test_sequence_builders(self):
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [
tokenizer.sep_token_id
]
|
AdaMix/tests/test_tokenization_distilbert.py/0
|
{
"file_path": "AdaMix/tests/test_tokenization_distilbert.py",
"repo_id": "AdaMix",
"token_count": 572
}
| 76 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from .test_tokenization_common import TokenizerTesterMixin
class ProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = ProphetNetTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
@require_torch
def test_prepare_batch(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
batch = tokenizer(src_text, padding=True, return_tensors="pt")
self.assertIsInstance(batch, BatchEncoding)
result = list(batch.input_ids.numpy()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 9), batch.input_ids.shape)
self.assertEqual((2, 9), batch.attention_mask.shape)
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_2 + [102]
|
AdaMix/tests/test_tokenization_prophetnet.py/0
|
{
"file_path": "AdaMix/tests/test_tokenization_prophetnet.py",
"repo_id": "AdaMix",
"token_count": 3447
}
| 77 |
# coding=utf-8
# Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import gc
import os
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, IntervalStrategy, PretrainedConfig, TrainingArguments, is_torch_available
from transformers.file_utils import WEIGHTS_NAME
from transformers.testing_utils import (
get_tests_dir,
require_datasets,
require_optuna,
require_ray,
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.utils.hp_naming import TrialShortNamer
if is_torch_available():
import torch
from torch.utils.data import IterableDataset
from transformers import (
AutoModelForMaskedLM,
AutoModelForSequenceClassification,
DataCollatorForLanguageModeling,
EarlyStoppingCallback,
GlueDataset,
GlueDataTrainingArguments,
GPT2Config,
GPT2LMHeadModel,
LineByLineTextDataset,
PreTrainedModel,
TextDataset,
Trainer,
TrainerState,
)
from transformers.modeling_utils import unwrap_model
PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
class RegressionDataset:
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
np.random.seed(seed)
self.label_names = ["labels"] if label_names is None else label_names
self.length = length
self.x = np.random.normal(size=(length,)).astype(np.float32)
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
self.ys = [y.astype(np.float32) for y in self.ys]
def __len__(self):
return self.length
def __getitem__(self, i):
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
result["input_x"] = self.x[i]
return result
@dataclasses.dataclass
class RegressionTrainingArguments(TrainingArguments):
a: float = 0.0
b: float = 0.0
class RepeatDataset:
def __init__(self, x, length=64):
self.x = x
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.x, "labels": self.x}
class DynamicShapesDataset:
def __init__(self, length=64, seed=42, batch_size=8):
self.length = length
np.random.seed(seed)
sizes = np.random.randint(1, 20, (length // batch_size,))
# For easy batching, we make every batch_size consecutive samples the same size.
self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_x": self.xs[i], "labels": self.ys[i]}
class AlmostAccuracy:
def __init__(self, thresh=0.25):
self.thresh = thresh
def __call__(self, eval_pred):
predictions, labels = eval_pred
true = np.abs(predictions - labels) <= self.thresh
return {"accuracy": true.astype(np.float32).mean().item()}
class RegressionModelConfig(PretrainedConfig):
def __init__(self, a=0, b=0, double_output=False, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
self.double_output = double_output
if is_torch_available():
class SampleIterableDataset(IterableDataset):
"""
Criteria is not whether it is IterableDataset or not, criteria is whether __len__ is implemented
"""
def __init__(self, file_path, tokenizer):
self.ds = TextDataset(file_path=file_path, tokenizer=tokenizer, block_size=64)
def __iter__(self):
for i in range(len(self.ds)):
yield self.ds[i]
class RegressionModel(torch.nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(a).float())
self.b = torch.nn.Parameter(torch.tensor(b).float())
self.double_output = double_output
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = torch.nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionDictModel(torch.nn.Module):
def __init__(self, a=0, b=0):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(a).float())
self.b = torch.nn.Parameter(torch.tensor(b).float())
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
result = {"output": y}
if labels is not None:
result["loss"] = torch.nn.functional.mse_loss(y, labels)
return result
class RegressionPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = torch.nn.Parameter(torch.tensor(config.a).float())
self.b = torch.nn.Parameter(torch.tensor(config.b).float())
self.double_output = config.double_output
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = torch.nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class TstLayer(torch.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = torch.nn.Linear(hidden_size, hidden_size)
self.ln1 = torch.nn.LayerNorm(hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, hidden_size)
self.ln2 = torch.nn.LayerNorm(hidden_size)
self.bias = torch.nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(torch.nn.functional.relu(self.linear1(x)))
h = torch.nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
def get_regression_trainer(a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, **kwargs):
label_names = kwargs.get("label_names", None)
train_dataset = RegressionDataset(length=train_len, label_names=label_names)
eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)
if pretrained:
config = RegressionModelConfig(a=a, b=b, double_output=double_output)
model = RegressionPreTrainedModel(config)
else:
model = RegressionModel(a=a, b=b, double_output=double_output)
compute_metrics = kwargs.pop("compute_metrics", None)
data_collator = kwargs.pop("data_collator", None)
optimizers = kwargs.pop("optimizers", (None, None))
output_dir = kwargs.pop("output_dir", "./regression")
model_init = kwargs.pop("model_init", None)
args = RegressionTrainingArguments(output_dir, a=a, b=b, **kwargs)
return Trainer(
model,
args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=optimizers,
model_init=model_init,
)
@require_torch
@require_sentencepiece
@require_tokenizers
class TrainerIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.default_trained_model = (trainer.model.a, trainer.model.b)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.alternate_trained_model = (trainer.model.a, trainer.model.b)
def check_trained_model(self, model, alternate_seed=False):
# Checks a training seeded with learning_rate = 0.1
(a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model
self.assertTrue(torch.allclose(model.a, a))
self.assertTrue(torch.allclose(model.b, b))
def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True):
file_list = [WEIGHTS_NAME, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"]
if is_pretrained:
file_list.append("config.json")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint))
for filename in file_list:
self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))
def check_best_model_has_been_loaded(
self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True
):
checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}")
log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history
values = [d[metric] for d in log_history]
best_value = max(values) if greater_is_better else min(values)
best_checkpoint = (values.index(best_value) + 1) * freq
checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}")
if is_pretrained:
best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
best_model.to(trainer.args.device)
else:
best_model = RegressionModel()
state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
self.assertTrue(torch.allclose(best_model.a, trainer.model.a))
self.assertTrue(torch.allclose(best_model.b, trainer.model.b))
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
# We'll pop things so operate on copies.
state = trainer_state.copy()
state1 = trainer_state1.copy()
# Log history main contain different logs for the time metrics (after resuming a training).
log_history = state.pop("log_history", None)
log_history1 = state1.pop("log_history", None)
self.assertEqual(state, state1)
for log, log1 in zip(log_history, log_history1):
_ = log.pop("train_runtime", None)
_ = log1.pop("train_runtime", None)
_ = log.pop("train_samples_per_second", None)
_ = log1.pop("train_samples_per_second", None)
self.assertEqual(log, log1)
def test_trainer_works_with_dict(self):
# Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break
# anything.
train_dataset = RegressionDataset()
eval_dataset = RegressionDataset()
model = RegressionDictModel()
args = TrainingArguments("./regression")
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
_ = trainer.evaluate()
_ = trainer.predict(eval_dataset)
def test_evaluation_with_keys_to_drop(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_ctx=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,))
eval_dataset = RepeatDataset(x)
args = TrainingArguments("./test")
trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset)
# By default the past_key_values are removed
result = trainer.predict(eval_dataset)
self.assertTrue(isinstance(result.predictions, np.ndarray))
# We can still get them by setting ignore_keys to []
result = trainer.predict(eval_dataset, ignore_keys=[])
self.assertTrue(isinstance(result.predictions, tuple))
self.assertEqual(len(result.predictions), 2)
def test_training_arguments_are_left_untouched(self):
trainer = get_regression_trainer()
trainer.train()
args = TrainingArguments("./regression")
dict1, dict2 = args.to_dict(), trainer.args.to_dict()
for key in dict1.keys():
# Logging dir can be slightly different as they default to something with the time.
if key != "logging_dir":
self.assertEqual(dict1[key], dict2[key])
def test_reproducible_training(self):
# Checks that training worked, model trained and seed made a reproducible training.
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
# Checks that a different seed gets different (reproducible) results.
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_number_of_steps_in_training(self):
# Regular training has n_epochs * len(train_dl) steps
trainer = get_regression_trainer(learning_rate=0.1)
train_output = trainer.train()
self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size)
# Check passing num_train_epochs works (and a float version too):
trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size))
# If we pass a max_steps, num_train_epochs is ignored
trainer = get_regression_trainer(learning_rate=0.1, max_steps=10)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 10)
def test_train_and_eval_dataloaders(self):
n_gpu = max(1, torch.cuda.device_count())
trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16)
self.assertEqual(trainer.get_train_dataloader().batch_size, 16 * n_gpu)
trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16)
self.assertEqual(trainer.get_eval_dataloader().batch_size, 16 * n_gpu)
# Check drop_last works
trainer = get_regression_trainer(
train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32
)
self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1)
self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1)
trainer = get_regression_trainer(
train_len=66,
eval_len=74,
learning_rate=0.1,
per_device_train_batch_size=16,
per_device_eval_batch_size=32,
dataloader_drop_last=True,
)
self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu))
self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu))
# Check passing a new dataset for evaluation works
new_eval_dataset = RegressionDataset(length=128)
self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu))
@require_torch_multi_gpu
def test_data_is_not_parallelized_when_model_is_parallel(self):
model = RegressionModel()
# Make the Trainer believe it's a parallelized model
model.is_parallelizable = True
model.model_parallel = True
args = TrainingArguments("./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16)
trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset())
# Check the Trainer was fooled
self.assertTrue(trainer.is_model_parallel)
self.assertEqual(trainer.args.n_gpu, 1)
# The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu
self.assertEqual(trainer.get_train_dataloader().batch_size, 16)
self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16)
self.assertEqual(trainer.get_eval_dataloader().batch_size, 16)
self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16)
def test_evaluate(self):
trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict(self):
trainer = get_regression_trainer(a=1.5, b=2.5)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With more than one output of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"])
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertTrue(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
def test_dynamic_shapes(self):
eval_dataset = DynamicShapesDataset(batch_size=self.batch_size)
model = RegressionModel(a=2, b=1)
args = TrainingArguments("./regression")
trainer = Trainer(model, args, eval_dataset=eval_dataset)
# Check evaluation can run to completion
_ = trainer.evaluate()
# Check predictions
preds = trainer.predict(eval_dataset)
for expected, seen in zip(eval_dataset.ys, preds.label_ids):
self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
for expected, seen in zip(eval_dataset.xs, preds.predictions):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
# Same tests with eval accumulation
args = TrainingArguments("./regression", eval_accumulation_steps=2)
trainer = Trainer(model, args, eval_dataset=eval_dataset)
# Check evaluation can run to completion
_ = trainer.evaluate()
# Check predictions
preds = trainer.predict(eval_dataset)
for expected, seen in zip(eval_dataset.ys, preds.label_ids):
self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
for expected, seen in zip(eval_dataset.xs, preds.predictions):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
@require_datasets
def test_trainer_with_datasets(self):
import datasets
np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32)
y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,))
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y})
# Base training. Should have the same results as test_reproducible_training
model = RegressionModel()
args = TrainingArguments("./regression", learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Can return tensors.
train_dataset.set_format(type="torch", dtype=torch.float32)
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Adding one column not used by the model should have no impact
z = np.random.normal(size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z})
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
def test_custom_optimizer(self):
train_dataset = RegressionDataset()
args = TrainingArguments("./regression")
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0)
def test_model_init(self):
train_dataset = RegressionDataset()
args = TrainingArguments("./regression", learning_rate=0.1)
trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel())
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results.
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results and new seed should be used.
trainer.args.seed = 314
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_save_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size))
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False)
def test_gradient_accumulation(self):
# Training with half the batch size but accumulation steps as 2 should give the same results.
trainer = get_regression_trainer(
gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1
)
trainer.train()
self.check_trained_model(trainer.model)
@require_torch_multi_gpu
def test_run_seq2seq_double_train_wrap_once(self):
# test that we don't wrap the model more than once
# since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for
# example DataParallel(DataParallel(model))
trainer = get_regression_trainer()
trainer.train()
model_wrapped_before = trainer.model_wrapped
trainer.train()
model_wrapped_after = trainer.model_wrapped
self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice")
def test_can_resume_training(self):
if torch.cuda.device_count() > 2:
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
return
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmpdir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, pretrained=False
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer and load model
trainer = get_regression_trainer(
output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, pretrained=False
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmpdir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(
output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, pretrained=False
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_resume_training_with_gradient_accumulation(self):
if torch.cuda.device_count() > 2:
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
return
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_load_best_model_at_end(self):
total = int(self.n_epochs * 64 / self.batch_size)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
load_best_model_at_end=True,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss")
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True)
# Save is done every eval regardless of the strategy
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
evaluation_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total)
self.check_best_model_has_been_loaded(
tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True
)
# Test this works with a non PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
load_best_model_at_end=True,
pretrained=False,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False)
@slow
def test_trainer_eval_mrpc(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
)
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset)
result = trainer.evaluate()
self.assertLess(result["eval_loss"], 0.2)
@slow
def test_trainer_eval_lm(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
dataset = LineByLineTextDataset(
tokenizer=tokenizer,
file_path=PATH_SAMPLE_TEXT,
block_size=tokenizer.max_len_single_sentence,
)
self.assertEqual(len(dataset), 31)
def test_trainer_iterable_dataset(self):
# Simulate Language Modeling with an IterableDataset, with no __len__ method
# Pick-up a tiny model, so it works on CPU
# See Issue #5990: https://github.com/huggingface/transformers/issues/5990
MODEL_ID = "sshleifer/tiny-distilbert-base-cased"
model = AutoModelForMaskedLM.from_pretrained(MODEL_ID)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
train_dataset = SampleIterableDataset(file_path=PATH_SAMPLE_TEXT, tokenizer=tokenizer)
training_args = TrainingArguments(output_dir="./examples", no_cuda=True, max_steps=2)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)
training_args = TrainingArguments(output_dir="./examples", no_cuda=True, max_steps=2)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=data_collator)
trainer.train()
loader = trainer.get_train_dataloader()
self.assertIsInstance(loader, torch.utils.data.DataLoader)
self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler)
# Exception if giving iterable dataset and no max_steps
with self.assertRaises(ValueError):
training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
_ = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=data_collator)
# Exception if eval_dataset is iterable in __init__
with self.assertRaises(ValueError):
training_args = TrainingArguments(output_dir="./examples", no_cuda=True, max_steps=2)
_ = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=train_dataset,
data_collator=data_collator,
)
# Exception if predicting with iterable dataset
with self.assertRaises(ValueError):
training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
trainer = Trainer(model=model, args=training_args, data_collator=data_collator)
trainer.predict(train_dataset)
# Exception if evaluating with iterable dataset
with self.assertRaises(ValueError):
training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
trainer = Trainer(model=model, args=training_args, data_collator=data_collator)
trainer.evaluate(train_dataset)
def test_num_train_epochs_in_training(self):
# len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given.
# It should give 1 update step for each epoch.
trainer = get_regression_trainer(
max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5
)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 3)
# Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if
# len(train_dl) < gradient_accumulation_steps.
trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(self.n_epochs))
def test_early_stopping_callback(self):
# early stopping stops training before num_training_epochs
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
num_train_epochs=20,
gradient_accumulation_steps=1,
per_device_train_batch_size=16,
load_best_model_at_end=True,
evaluation_strategy=IntervalStrategy.EPOCH,
compute_metrics=AlmostAccuracy(),
metric_for_best_model="accuracy",
)
trainer.add_callback(EarlyStoppingCallback(1, 0.0001))
train_output = trainer.train()
self.assertLess(train_output.global_step, 20 * 64 / 16)
# Invalid inputs to trainer with early stopping callback result in assertion error
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
num_train_epochs=20,
gradient_accumulation_steps=1,
per_device_train_batch_size=16,
evaluation_strategy=IntervalStrategy.EPOCH,
compute_metrics=AlmostAccuracy(),
metric_for_best_model="accuracy",
)
trainer.add_callback(EarlyStoppingCallback(1))
self.assertEqual(trainer.state.global_step, 0)
try:
trainer.train()
except AssertionError:
self.assertEqual(trainer.state.global_step, 0)
def test_flos_extraction(self):
trainer = get_regression_trainer(learning_rate=0.1)
def assert_flos_extraction(trainer, wrapped_model_to_check):
self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check))
self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0)
# with plain model
assert_flos_extraction(trainer, trainer.model)
# with enforced DataParallel
assert_flos_extraction(trainer, torch.nn.DataParallel(trainer.model))
trainer.train()
self.assertTrue(isinstance(trainer.state.total_flos, float))
def check_mem_metrics(self, trainer, check_func):
metrics = trainer.train().metrics
check_func("init_mem_cpu_alloc_delta", metrics)
check_func("train_mem_cpu_alloc_delta", metrics)
if torch.cuda.device_count() > 0:
check_func("init_mem_gpu_alloc_delta", metrics)
check_func("train_mem_gpu_alloc_delta", metrics)
metrics = trainer.evaluate()
check_func("eval_mem_cpu_alloc_delta", metrics)
if torch.cuda.device_count() > 0:
check_func("eval_mem_gpu_alloc_delta", metrics)
metrics = trainer.predict(RegressionDataset()).metrics
check_func("test_mem_cpu_alloc_delta", metrics)
if torch.cuda.device_count() > 0:
check_func("test_mem_gpu_alloc_delta", metrics)
def test_mem_metrics(self):
# with mem metrics enabled
trainer = get_regression_trainer()
self.check_mem_metrics(trainer, self.assertIn)
# with mem metrics disabled
trainer = get_regression_trainer(skip_memory_metrics=True)
self.check_mem_metrics(trainer, self.assertNotIn)
@require_torch_gpu
def test_fp16_full_eval(self):
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
bs = 8
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. with mem metrics enabled
trainer = get_regression_trainer(a=a, b=b, eval_len=16)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with mem metrics disabled
trainer = get_regression_trainer(a=a, b=b, eval_len=16, fp16_full_eval=True)
metrics = trainer.evaluate()
fp16_init = metrics["init_mem_gpu_alloc_delta"]
fp16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp16_init {fp16_init}")
print(f"fp16_eval {fp16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: fp16_init == close to zero
self.assertLess(fp16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(fp16_eval, 27_000)
# 3. relative comparison fp32 vs full fp16
# should be about half of fp16_init
# perfect world: fp32_init/2 == fp16_eval
self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000)
def test_no_wd_param_group(self):
model = torch.nn.Sequential(TstLayer(128), torch.nn.ModuleList([TstLayer(128), TstLayer(128)]))
trainer = Trainer(model=model)
trainer.create_optimizer_and_scheduler(10)
# fmt: off
wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight']
# fmt: on
wd_params = [p for n, p in model.named_parameters() if n in wd_names]
no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names]
self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params)
self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params)
@require_torch
@require_optuna
class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4)
@require_torch
@require_ray
class TrainerHyperParameterRayIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
from ray import tune
return {
"a": tune.randint(-4, 4),
"b": tune.randint(-4, 4),
}
def model_init(config):
model_config = RegressionModelConfig(a=config["a"], b=config["b"], double_output=False)
return RegressionPreTrainedModel(model_config)
def hp_name(params):
return MyTrialShortNamer.shortname(params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4
)
|
AdaMix/tests/test_trainer.py/0
|
{
"file_path": "AdaMix/tests/test_trainer.py",
"repo_id": "AdaMix",
"token_count": 21678
}
| 78 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
fork_point_sha = subprocess.check_output("git merge-base master HEAD".split()).decode("utf-8")
modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
joined_dirs = "|".join(sys.argv[1:])
regex = re.compile(fr"^({joined_dirs}).*?\.py$")
relevant_modified_files = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
|
AdaMix/utils/get_modified_files.py/0
|
{
"file_path": "AdaMix/utils/get_modified_files.py",
"repo_id": "AdaMix",
"token_count": 435
}
| 79 |
## AirSim Drone Racing Lab: Overview
<img src="https://github.com/madratman/airsim_neurips_gifs/blob/master/imgs/race.gif?raw=true" width="400">
ADRL is a framework for drone racing research, built on [Microsoft AirSim](https://github.com/Microsoft/Airsim).
We used our framework to host a simulation-based drone racing competition at NeurIPS 2019, [Game of Drones](https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing).
Currently, ADRL allows you to focus on three core research directions pertinent to autonomous drone racing - perception, trajectory planning and control, and head-tp-head competition with a single competitor drone.
## Race Tiers
<img src="https://github.com/madratman/airsim_neurips_gifs/blob/master/imgs/tier_1.gif?raw=true" width="400"> <img src="https://github.com/madratman/airsim_neurips_gifs/blob/master/imgs/tier_2.gif?raw=true" width="400">
### Tier I: Planning Only
The participant’s drone races tête-à-tête with an opponent racer.
Ground truth for state estimation and environment is provided via our APIs, in the form of the odometry (position and velocity) of the participant and the opponent drones, and the poses of all the gates.
The opponent racer follows a minimum jerk trajectory via `moveOnSpline`, and goes through randomized waypoints selected in each gate's cross section.
Hence, the opponent's trajectory varies at every run.
### Tier II: Perception Only
In this tier, the gate poses returned by the API are corrupted with noise as shown in the GIF on the right side above, and there is no opponent drone.
The ground truth state estimate of the participant drone is still available.
The next gate is not always in view, but the noisy pose returned by our API help steer the participants roughly in the right direction, after which vision-based control would be necessary.
### Tier III: Planning and Perception
This tier has both noisy gate poses and an opponent racer, and is essentially a combination of Tier 1 and 2.
|
AirSim-Drone-Racing-Lab/docs/adrl_overview.md/0
|
{
"file_path": "AirSim-Drone-Racing-Lab/docs/adrl_overview.md",
"repo_id": "AirSim-Drone-Racing-Lab",
"token_count": 530
}
| 80 |
import time
from PoseSampler import *
num_samples = 100
dataset_path = '/home/rb/all_files/airsim_datasets/soccer_test'
# check if output folder exists
if not os.path.isdir(dataset_path):
os.makedirs(dataset_path)
img_dir = os.path.join(dataset_path, 'images')
os.makedirs(img_dir)
else:
print('Error: path already exists')
pose_sampler = PoseSampler(num_samples, dataset_path)
for idx in range(pose_sampler.num_samples):
pose_sampler.update()
if idx % 1000 == 0:
print('Num samples: {}'.format(idx))
# time.sleep(0.3) #comment this out once you like your ranges of values
|
AirSim-Drone-Racing-VAE-Imitation/datagen/img_generator/main.py/0
|
{
"file_path": "AirSim-Drone-Racing-VAE-Imitation/datagen/img_generator/main.py",
"repo_id": "AirSim-Drone-Racing-VAE-Imitation",
"token_count": 245
}
| 81 |
from .dataset_utils import *
from .stats_utils import *
from .geom_utils import *
from .trajectory_utils import *
|
AirSim-Drone-Racing-VAE-Imitation/racing_utils/__init__.py/0
|
{
"file_path": "AirSim-Drone-Racing-VAE-Imitation/racing_utils/__init__.py",
"repo_id": "AirSim-Drone-Racing-VAE-Imitation",
"token_count": 36
}
| 82 |
#!/bin/bash
wget -c https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/releases/download/v1.1-linux/AirSim.zip;
mkdir -p /home/$USER/Documents/AirSim;
unzip AirSim.zip;
mv AirSim AirSim_Final_Round;
wget --directory-prefix=AirSim_Final_Round/AirSimExe/Content/Paks -c https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/releases/download/v1.1-linux/Final_Tier_1_and_Tier_2.pak;
wget --directory-prefix=AirSim_Final_Round/AirSimExe/Content/Paks -c https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/releases/download/v1.1-linux/Final_Tier_3.pak;
wget --directory-prefix=/home/$USER/Documents/AirSim -c https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/releases/download/v1.1-linux/settings.json;
rm AirSim.zip;
|
AirSim-NeurIPS2019-Drone-Racing/download_final_round_binaries.sh/0
|
{
"file_path": "AirSim-NeurIPS2019-Drone-Racing/download_final_round_binaries.sh",
"repo_id": "AirSim-NeurIPS2019-Drone-Racing",
"token_count": 289
}
| 83 |
include *.md
include azure/__init__.py
include azure/monitor/__init__.py
include LICENSE
recursive-include tests *.py
recursive-include samples *.py *.md
global-exclude *.pyc
global-exclude *.pyo
global-exclude __pycache__/*
|
ApplicationInsights-Python/azure-monitor-events-extension/MANIFEST.in/0
|
{
"file_path": "ApplicationInsights-Python/azure-monitor-events-extension/MANIFEST.in",
"repo_id": "ApplicationInsights-Python",
"token_count": 74
}
| 84 |
---
repos:
- repo: https://github.com/pycqa/flake8
rev: '6.0.0'
hooks:
- id: flake8
- repo: https://github.com/hcodes/yaspeller
rev: v8.0.1
hooks:
- id: yaspeller
|
AzureTRE/.pre-commit-config.yaml/0
|
{
"file_path": "AzureTRE/.pre-commit-config.yaml",
"repo_id": "AzureTRE",
"token_count": 103
}
| 85 |
import logging
import json
import azure.functions as func
from azure.storage.blob import BlobServiceClient
from shared_code import blob_operations
def delete_blob_and_container_if_last_blob(blob_url: str):
storage_account_name, container_name, blob_name = blob_operations.get_blob_info_from_blob_url(blob_url=blob_url)
credential = blob_operations.get_credential()
blob_service_client = BlobServiceClient(
account_url=blob_operations.get_account_url(storage_account_name),
credential=credential)
container_client = blob_service_client.get_container_client(container_name)
if not blob_name:
logging.info(f'No specific blob specified, deleting the entire container: {container_name}')
container_client.delete_container()
return
# If it's the only blob in the container, we need to delete the container too
# Check how many blobs are in the container (note: this exhausts the generator)
blobs_num = sum(1 for _ in container_client.list_blobs())
logging.info(f'Found {blobs_num} blobs in the container')
# Deleting blob
logging.info(f'Deleting blob {blob_name}...')
blob_client = container_client.get_blob_client(blob_name)
blob_client.delete_blob()
if blobs_num == 1:
# Need to delete the container too
logging.info(f'There was one blob in the container. Deleting container {container_name}...')
container_client.delete_container()
def main(msg: func.ServiceBusMessage):
body = msg.get_body().decode('utf-8')
logging.info(f'Python ServiceBus queue trigger processed message: {body}')
json_body = json.loads(body)
blob_url = json_body["data"]["blob_to_delete"]
logging.info(f'Blob to delete is {blob_url}')
delete_blob_and_container_if_last_blob(blob_url)
|
AzureTRE/airlock_processor/DataDeletionTrigger/__init__.py/0
|
{
"file_path": "AzureTRE/airlock_processor/DataDeletionTrigger/__init__.py",
"repo_id": "AzureTRE",
"token_count": 651
}
| 86 |
from resources import strings
from fastapi import Request
from fastapi.responses import PlainTextResponse
from services.logging import logger
async def generic_error_handler(_: Request, exception: Exception) -> PlainTextResponse:
logger.debug("=====================================")
logger.exception(exception)
logger.debug("=====================================")
return PlainTextResponse(strings.UNABLE_TO_PROCESS_REQUEST, status_code=500)
|
AzureTRE/api_app/api/errors/generic_error.py/0
|
{
"file_path": "AzureTRE/api_app/api/errors/generic_error.py",
"repo_id": "AzureTRE",
"token_count": 121
}
| 87 |
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, status
from pydantic import parse_obj_as
from api.dependencies.workspace_service_templates import get_workspace_service_template_by_name_from_path
from api.routes.resource_helpers import get_template
from db.errors import EntityVersionExist, InvalidInput
from api.helpers import get_repository
from db.repositories.resource_templates import ResourceTemplateRepository
from models.domain.resource import ResourceType
from models.schemas.user_resource_template import UserResourceTemplateInResponse, UserResourceTemplateInCreate
from models.schemas.resource_template import ResourceTemplateInformationInList
from resources import strings
from services.authentication import get_current_admin_user, get_current_tre_user_or_tre_admin
user_resource_templates_core_router = APIRouter(dependencies=[Depends(get_current_tre_user_or_tre_admin)])
@user_resource_templates_core_router.get("/workspace-service-templates/{service_template_name}/user-resource-templates", response_model=ResourceTemplateInformationInList, name=strings.API_GET_USER_RESOURCE_TEMPLATES, dependencies=[Depends(get_current_tre_user_or_tre_admin)])
async def get_user_resource_templates_for_service_template(service_template_name: str, template_repo=Depends(get_repository(ResourceTemplateRepository))) -> ResourceTemplateInformationInList:
template_infos = await template_repo.get_templates_information(ResourceType.UserResource, parent_service_name=service_template_name)
return ResourceTemplateInformationInList(templates=template_infos)
@user_resource_templates_core_router.get("/workspace-service-templates/{service_template_name}/user-resource-templates/{user_resource_template_name}", response_model=UserResourceTemplateInResponse, response_model_exclude_none=True, name=strings.API_GET_USER_RESOURCE_TEMPLATE_BY_NAME, dependencies=[Depends(get_current_tre_user_or_tre_admin)])
async def get_user_resource_template(service_template_name: str, user_resource_template_name: str, is_update: bool = False, version: Optional[str] = None, template_repo=Depends(get_repository(ResourceTemplateRepository))) -> UserResourceTemplateInResponse:
template = await get_template(user_resource_template_name, template_repo, ResourceType.UserResource, service_template_name, is_update=is_update, version=version)
return parse_obj_as(UserResourceTemplateInResponse, template)
@user_resource_templates_core_router.post("/workspace-service-templates/{service_template_name}/user-resource-templates", status_code=status.HTTP_201_CREATED, response_model=UserResourceTemplateInResponse, response_model_exclude_none=True, name=strings.API_CREATE_USER_RESOURCE_TEMPLATES, dependencies=[Depends(get_current_admin_user)])
async def register_user_resource_template(template_input: UserResourceTemplateInCreate, template_repo=Depends(get_repository(ResourceTemplateRepository)), workspace_service_template=Depends(get_workspace_service_template_by_name_from_path)) -> UserResourceTemplateInResponse:
try:
return await template_repo.create_and_validate_template(template_input, ResourceType.UserResource, workspace_service_template.name)
except EntityVersionExist:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_TEMPLATE_VERSION_EXISTS)
except InvalidInput as e:
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=str(e))
|
AzureTRE/api_app/api/routes/user_resource_templates.py/0
|
{
"file_path": "AzureTRE/api_app/api/routes/user_resource_templates.py",
"repo_id": "AzureTRE",
"token_count": 1036
}
| 88 |
from typing import Optional
from azure.cosmos.aio import ContainerProxy
from azure.core import MatchConditions
from pydantic import BaseModel
from api.dependencies.database import Database
from db.errors import UnableToAccessDatabase
class BaseRepository:
@classmethod
async def create(cls, container_name: Optional[str] = None):
try:
cls._container: ContainerProxy = await Database().get_container_proxy(container_name)
except Exception:
raise UnableToAccessDatabase
return cls
@property
def container(self) -> ContainerProxy:
return self._container
async def query(self, query: str, parameters: Optional[dict] = None):
items = self.container.query_items(query=query, parameters=parameters)
return [i async for i in items]
async def read_item_by_id(self, item_id: str) -> dict:
return await self.container.read_item(item=item_id, partition_key=item_id)
async def save_item(self, item: BaseModel):
await self.container.create_item(body=item.dict())
async def update_item(self, item: BaseModel):
await self.container.upsert_item(body=item.dict())
async def update_item_with_etag(self, item: BaseModel, etag: str) -> BaseModel:
await self.container.replace_item(item=item.id, body=item.dict(), etag=etag, match_condition=MatchConditions.IfNotModified)
return await self.read_item_by_id(item.id)
async def upsert_item_with_etag(self, item: BaseModel, etag: str) -> BaseModel:
return await self.container.upsert_item(body=item.dict(), etag=etag, match_condition=MatchConditions.IfNotModified)
async def update_item_dict(self, item_dict: dict):
await self.container.upsert_item(body=item_dict)
async def delete_item(self, item_id: str):
await self.container.delete_item(item=item_id, partition_key=item_id)
async def rename_field_name(self, old_field_name: str, new_field_name: str):
for item in await self.query('SELECT * FROM c'):
if old_field_name in item:
item[new_field_name] = item[old_field_name]
del item[old_field_name]
await self.update_item_dict(item)
|
AzureTRE/api_app/db/repositories/base.py/0
|
{
"file_path": "AzureTRE/api_app/db/repositories/base.py",
"repo_id": "AzureTRE",
"token_count": 841
}
| 89 |
from pydantic import Field
from pydantic.types import UUID4
from pydantic.schema import Optional
from models.domain.azuretremodel import AzureTREModel
from typing import List
from models.domain.airlock_request import AirlockFile
class EventGridMessageData(AzureTREModel):
completed_step: str = Field(title="", description="")
new_status: Optional[str] = Field(title="", description="")
request_id: str = Field(title="", description="")
request_files: Optional[List[AirlockFile]] = Field(title="", description="")
status_message: Optional[str] = Field(title="", description="")
class StepResultStatusUpdateMessage(AzureTREModel):
"""
Model for service bus message flowing back to API to update status in DB
"""
id: UUID4 = Field(title="", description="")
subject: str = Field(title="", description="")
data: EventGridMessageData = Field(title="", description="")
eventType: str = Field(title="", description="")
eventTime: str = Field(title="", description="")
topic: str = Field(title="", description="")
|
AzureTRE/api_app/models/domain/airlock_operations.py/0
|
{
"file_path": "AzureTRE/api_app/models/domain/airlock_operations.py",
"repo_id": "AzureTRE",
"token_count": 328
}
| 90 |
from typing import List
from pydantic import BaseModel, Field
from models.domain.resource import ResourceType
from models.domain.workspace_service import WorkspaceService
def get_sample_workspace_service(workspace_id: str, workspace_service_id: str) -> dict:
return {
"id": workspace_service_id,
"workspaceId": workspace_id,
"templateName": "guacamole",
"templateVersion": "0.1.0",
"properties": {
"display_name": "my workspace service",
"description": "some description",
},
"resourceType": ResourceType.WorkspaceService
}
class WorkspaceServiceInResponse(BaseModel):
workspaceService: WorkspaceService
class Config:
schema_extra = {
"example": {
"workspace_service": get_sample_workspace_service("933ad738-7265-4b5f-9eae-a1a62928772e", "2fdc9fba-726e-4db6-a1b8-9018a2165748")
}
}
class WorkspaceServicesInList(BaseModel):
workspaceServices: List[WorkspaceService] = Field([], title="Workspace services")
class Config:
schema_extra = {
"example": {
"workspaceServices": [
get_sample_workspace_service("933ad738-7265-4b5f-9eae-a1a62928772e", "2fdc9fba-726e-4db6-a1b8-9018a2165748"),
get_sample_workspace_service("933ad738-7265-4b5f-9eae-a1a62928772e", "abcc9fba-726e-4db6-a1b8-9018a2165748")
]
}
}
class WorkspaceServiceInCreate(BaseModel):
templateName: str = Field(title="Workspace service type", description="Bundle name")
properties: dict = Field({}, title="Workspace service parameters", description="Values for the parameters required by the workspace service resource specification")
class Config:
schema_extra = {
"example": {
"templateName": "tre-service-guacamole",
"properties": {
"display_name": "my workspace service",
"description": "some description",
}
}
}
|
AzureTRE/api_app/models/schemas/workspace_service.py/0
|
{
"file_path": "AzureTRE/api_app/models/schemas/workspace_service.py",
"repo_id": "AzureTRE",
"token_count": 945
}
| 91 |
import asyncio
import json
from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
from azure.servicebus.exceptions import OperationTimeoutError, ServiceBusConnectionError
from fastapi import HTTPException
from pydantic import ValidationError, parse_obj_as
from api.dependencies.airlock import get_airlock_request_by_id_from_path
from services.airlock import update_and_publish_event_airlock_request
from services.logging import logger, tracer
from db.repositories.workspaces import WorkspaceRepository
from models.domain.airlock_request import AirlockRequestStatus
from db.repositories.airlock_requests import AirlockRequestRepository
from models.domain.airlock_operations import StepResultStatusUpdateMessage
from core import config, credentials
from resources import strings
class AirlockStatusUpdater():
def __init__(self):
pass
async def init_repos(self):
self.airlock_request_repo = await AirlockRequestRepository.create()
self.workspace_repo = await WorkspaceRepository.create()
async def receive_messages(self):
with tracer.start_as_current_span("airlock_receive_messages"):
while True:
try:
async with credentials.get_credential_async_context() as credential:
service_bus_client = ServiceBusClient(config.SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE, credential)
receiver = service_bus_client.get_queue_receiver(queue_name=config.SERVICE_BUS_STEP_RESULT_QUEUE)
logger.info(f"Looking for new messages on {config.SERVICE_BUS_STEP_RESULT_QUEUE} queue...")
async with receiver:
received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=1)
for msg in received_msgs:
async with AutoLockRenewer() as renewer:
renewer.register(receiver, msg, max_lock_renewal_duration=60)
complete_message = await self.process_message(msg)
if complete_message:
await receiver.complete_message(msg)
else:
# could have been any kind of transient issue, we'll abandon back to the queue, and retry
await receiver.abandon_message(msg)
await asyncio.sleep(10)
except OperationTimeoutError:
# Timeout occurred whilst connecting to a session - this is expected and indicates no non-empty sessions are available
logger.debug("No sessions for this process. Will look again...")
except ServiceBusConnectionError:
# Occasionally there will be a transient / network-level error in connecting to SB.
logger.info("Unknown Service Bus connection error. Will retry...")
except Exception as e:
# Catch all other exceptions, log them via .exception to get the stack trace, and reconnect
logger.exception(f"Unknown exception. Will retry - {e}")
async def process_message(self, msg):
with tracer.start_as_current_span("process_message") as current_span:
complete_message = False
try:
message = parse_obj_as(StepResultStatusUpdateMessage, json.loads(str(msg)))
current_span.set_attribute("step_id", message.id)
current_span.set_attribute("event_type", message.eventType)
current_span.set_attribute("topic", message.topic)
logger.info(f"Received step_result status update message with correlation ID {message.id}: {message}")
complete_message = await self.update_status_in_database(message)
logger.info(f"Update status in DB for {message.id}")
except (json.JSONDecodeError, ValidationError):
logger.exception(f"{strings.STEP_RESULT_MESSAGE_FORMAT_INCORRECT}: {msg.correlation_id}")
complete_message = True
except Exception:
logger.exception(f"Exception processing message: {msg.correlation_id}")
return complete_message
async def update_status_in_database(self, step_result_message: StepResultStatusUpdateMessage):
"""
Updates an airlock request and with the new status from step_result message contents.
"""
result = False
try:
step_result_data = step_result_message.data
airlock_request_id = step_result_data.request_id
current_status = step_result_data.completed_step
new_status = AirlockRequestStatus(step_result_data.new_status) if step_result_data.new_status else None
status_message = step_result_data.status_message
request_files = step_result_data.request_files
# Find the airlock request by id
airlock_request = await get_airlock_request_by_id_from_path(airlock_request_id=airlock_request_id, airlock_request_repo=self.airlock_request_repo)
# Validate that the airlock request status is the same as current status
if airlock_request.status == current_status:
workspace = await self.workspace_repo.get_workspace_by_id(airlock_request.workspaceId)
# update to new status and send to event grid
await update_and_publish_event_airlock_request(airlock_request=airlock_request, airlock_request_repo=self.airlock_request_repo, updated_by=airlock_request.updatedBy, workspace=workspace, new_status=new_status, request_files=request_files, status_message=status_message)
result = True
else:
logger.error(strings.STEP_RESULT_MESSAGE_STATUS_DOES_NOT_MATCH.format(airlock_request_id, current_status, airlock_request.status))
except HTTPException as e:
if e.status_code == 404:
# Marking as true as this message will never succeed anyways and should be removed from the queue.
result = True
logger.exception(strings.STEP_RESULT_ID_NOT_FOUND.format(airlock_request_id))
if e.status_code == 400:
result = True
logger.exception(strings.STEP_RESULT_MESSAGE_INVALID_STATUS.format(airlock_request_id, current_status, new_status))
if e.status_code == 503:
logger.exception(strings.STATE_STORE_ENDPOINT_NOT_RESPONDING)
except Exception:
logger.exception("Failed updating request status")
return result
|
AzureTRE/api_app/service_bus/airlock_request_status_update.py/0
|
{
"file_path": "AzureTRE/api_app/service_bus/airlock_request_status_update.py",
"repo_id": "AzureTRE",
"token_count": 2901
}
| 92 |
import json
import pytest
from mock import patch
from pydantic import parse_obj_as
from starlette import status
from db.errors import EntityDoesNotExist, EntityVersionExist, InvalidInput, UnableToAccessDatabase
from services.authentication import get_current_admin_user, get_current_tre_user_or_tre_admin
from models.domain.resource import ResourceType
from models.domain.resource_template import ResourceTemplate
from models.schemas.resource_template import ResourceTemplateInformation
from models.schemas.shared_service_template import SharedServiceTemplateInResponse
from resources import strings
from services.schema_service import enrich_shared_service_template
pytestmark = pytest.mark.asyncio
@pytest.fixture
def shared_service_template():
def create_shared_service_template(template_name: str = "base-shared-service-template"):
return ResourceTemplate(
id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
name=template_name,
description="base shared service",
version="0.1.0",
resourceType=ResourceType.SharedService,
current=True,
type="object",
required=[],
properties={},
actions=[]
)
return create_shared_service_template
class TestSharedServiceTemplates:
@pytest.fixture(autouse=True, scope='class')
def _prepare(self, app, admin_user):
app.dependency_overrides[get_current_tre_user_or_tre_admin] = admin_user
app.dependency_overrides[get_current_admin_user] = admin_user
yield
app.dependency_overrides = {}
# GET /shared-service-templates/
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.get_templates_information")
async def test_get_shared_service_templates_returns_template_names_and_description(self, get_templates_info_mock, app, client):
expected_template_infos = [
ResourceTemplateInformation(name="template1", title="template 1", description="description1"),
ResourceTemplateInformation(name="template2", title="template 2", description="description2")
]
get_templates_info_mock.return_value = expected_template_infos
response = await client.get(app.url_path_for(strings.API_GET_SHARED_SERVICE_TEMPLATES))
assert response.status_code == status.HTTP_200_OK
actual_template_infos = response.json()["templates"]
assert len(actual_template_infos) == len(expected_template_infos)
for template_info in expected_template_infos:
assert template_info in actual_template_infos
# GET /shared-service-templates/{service_template_name}
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.get_current_template")
async def test_get_shared_service_template_by_name_returns_enriched_template(self, get_current_template_mock, app, client, shared_service_template):
template_name = "template1"
get_current_template_mock.return_value = shared_service_template(template_name)
response = await client.get(app.url_path_for(strings.API_GET_SHARED_SERVICE_TEMPLATE_BY_NAME, shared_service_template_name=template_name))
assert response.status_code == status.HTTP_200_OK
assert response.json()["name"] == template_name
assert "description" in response.json()["required"]
# GET /shared-service-templates/{service_template_name}
@pytest.mark.parametrize("exception, expected_status", [
(EntityDoesNotExist, status.HTTP_404_NOT_FOUND),
(UnableToAccessDatabase, status.HTTP_503_SERVICE_UNAVAILABLE)
])
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.get_current_template")
async def test_get_shared_service_template_by_name_returns_not_found_if_does_not_exist(self, get_current_template_mock, app, client, exception, expected_status):
get_current_template_mock.side_effect = exception
response = await client.get(app.url_path_for(strings.API_GET_SHARED_SERVICE_TEMPLATE_BY_NAME, shared_service_template_name="non-existent"))
assert response.status_code == expected_status
# POST /shared-service-templates/
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.create_template")
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.get_current_template")
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.get_template_by_name_and_version")
async def test_when_creating_service_template_sets_additional_properties(self, get_template_by_name_and_version_mock, get_current_template_mock, create_template_mock, app, client, input_shared_service_template, basic_shared_service_template):
get_template_by_name_and_version_mock.side_effect = EntityDoesNotExist
get_current_template_mock.side_effect = EntityDoesNotExist
create_template_mock.return_value = basic_shared_service_template
response = await client.post(app.url_path_for(strings.API_CREATE_SHARED_SERVICE_TEMPLATES), json=input_shared_service_template.dict())
expected_template = parse_obj_as(SharedServiceTemplateInResponse, enrich_shared_service_template(basic_shared_service_template))
assert json.loads(response.text)["required"] == expected_template.dict(exclude_unset=True)["required"]
assert json.loads(response.text)["properties"] == expected_template.dict(exclude_unset=True)["properties"]
# POST /shared_services-templates
@patch("api.routes.shared_service_templates.ResourceTemplateRepository.create_and_validate_template", side_effect=EntityVersionExist)
async def test_version_exists_not_allowed(self, _, app, client, input_shared_service_template):
response = await client.post(app.url_path_for(strings.API_CREATE_SHARED_SERVICE_TEMPLATES), json=input_shared_service_template.dict())
assert response.status_code == status.HTTP_409_CONFLICT
@patch("api.routes.workspace_service_templates.ResourceTemplateRepository.create_and_validate_template", side_effect=InvalidInput)
async def test_creating_a_shared_service_template_raises_http_422_if_step_ids_are_duplicated(self, _, client, app, input_shared_service_template):
response = await client.post(app.url_path_for(strings.API_CREATE_SHARED_SERVICE_TEMPLATES), json=input_shared_service_template.dict())
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
|
AzureTRE/api_app/tests_ma/test_api/test_routes/test_shared_service_templates.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_api/test_routes/test_shared_service_templates.py",
"repo_id": "AzureTRE",
"token_count": 2297
}
| 93 |
import copy
from unittest.mock import AsyncMock
import uuid
import pytest
import pytest_asyncio
from mock import patch, MagicMock
from jsonschema.exceptions import ValidationError
from db.repositories.resources_history import ResourceHistoryRepository
from tests_ma.test_api.test_routes.test_resource_helpers import FAKE_CREATE_TIMESTAMP, FAKE_UPDATE_TIMESTAMP
from tests_ma.test_api.conftest import create_test_user
from db.errors import EntityDoesNotExist, UserNotAuthorizedToUseTemplate
from db.repositories.resources import ResourceRepository
from azure.cosmos.exceptions import CosmosResourceNotFoundError
from models.domain.resource import Resource
from models.domain.resource_template import ResourceTemplate
from models.domain.user_resource_template import UserResourceTemplate
from models.domain.workspace import ResourceType
from models.schemas.resource import ResourcePatch
from models.schemas.workspace import WorkspaceInCreate
RESOURCE_ID = str(uuid.uuid4())
@pytest_asyncio.fixture
async def resource_repo():
with patch('api.dependencies.database.Database.get_container_proxy', return_value=None):
resource_repo = await ResourceRepository().create()
yield resource_repo
@pytest_asyncio.fixture
async def resource_history_repo():
with patch('api.dependencies.database.Database.get_container_proxy', return_value=None):
resource_history_repo = await ResourceHistoryRepository().create()
yield resource_history_repo
@pytest.fixture
def workspace_input():
return WorkspaceInCreate(templateName="base-tre", properties={"display_name": "test", "description": "test", "client_id": "123"})
def sample_resource() -> Resource:
return Resource(
id=RESOURCE_ID,
isEnabled=True,
resourcePath="/resource/path",
templateName="template_name",
templateVersion="template_version",
properties={
'display_name': 'initial display name',
'description': 'initial description',
'computed_prop': 'computed_val'
},
resourceType=ResourceType.Workspace,
etag="some-etag-value",
resourceVersion=0,
updatedWhen=FAKE_CREATE_TIMESTAMP,
user=create_test_user()
)
def sample_resource_template() -> ResourceTemplate:
return ResourceTemplate(id="123",
name="tre-user-resource",
description="description",
version="0.1.0",
resourceType=ResourceType.UserResource,
current=True,
required=['os_image', 'title'],
properties={
'title': {
'type': 'string',
'title': 'Title of the resource'
},
'os_image': {
'type': 'string',
'title': 'Windows image',
'description': 'Select Windows image to use for VM',
'enum': [
'Windows 10',
'Server 2019 Data Science VM'
],
'updateable': False
},
'vm_size': {
'type': 'string',
'title': 'Windows image',
'description': 'Select Windows image to use for VM',
'enum': [
'small',
'large'
],
'updateable': True
}
},
actions=[]).dict(exclude_none=True)
def sample_nested_template() -> ResourceTemplate:
return ResourceTemplate(
id="123",
name="template1",
description="description",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
required=[],
properties={
'rules': {
'type': 'array',
'items': {
'type': 'object',
'required': [],
'properties': {
'protocol': {
'type': 'object',
'required': ['port'],
'items': {
'type': 'object',
'properties': {
'port': {
'type': 'string'
},
'method': {
'type': 'string'
}
}
}
}
}
}
}
},
customActions=[]
).dict(exclude_none=True)
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
@patch("db.repositories.resources.ResourceRepository._validate_resource_parameters", return_value=None)
async def test_validate_input_against_template_returns_template_version_if_template_is_valid(_, enriched_template_mock, resource_repo, workspace_input):
enriched_template_mock.return_value = ResourceTemplate(id="123",
name="template1",
description="description",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
required=[],
properties={},
customActions=[]).dict()
template = await resource_repo.validate_input_against_template("template1", workspace_input, ResourceType.Workspace, [])
assert template.version == "0.1.0"
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
async def test_validate_input_against_template_raises_value_error_if_template_does_not_exist(enriched_template_mock, resource_repo, workspace_input):
enriched_template_mock.side_effect = EntityDoesNotExist
with pytest.raises(ValueError):
await resource_repo.validate_input_against_template("template_name", workspace_input, ResourceType.Workspace, [])
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
async def test_validate_input_against_template_raises_value_error_if_the_user_resource_template_does_not_exist_for_the_given_workspace_service(enriched_template_mock, resource_repo, workspace_input):
enriched_template_mock.side_effect = EntityDoesNotExist
with pytest.raises(ValueError):
await resource_repo.validate_input_against_template("template_name", workspace_input, ResourceType.UserResource, [], "parent_template_name")
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
async def test_validate_input_against_template_raises_value_error_if_payload_is_invalid(enriched_template_mock, resource_repo, workspace_input):
template_dict = ResourceTemplate(
id="123",
name="template1",
description="description",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
required=["display_name"],
properties={},
customActions=[]).dict()
# the enrich template method does this
template_dict.pop("allOf")
enriched_template_mock.return_value = template_dict
# missing display name
workspace_input = WorkspaceInCreate(templateName="template1")
with pytest.raises(ValidationError):
await resource_repo.validate_input_against_template("template1", workspace_input, ResourceType.Workspace, [])
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
async def test_validate_input_against_template_raises_if_user_does_not_have_required_role(enriched_template_mock, resource_repo, workspace_input):
enriched_template_mock.return_value = ResourceTemplate(id="123",
name="template1",
description="description",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
required=[],
authorizedRoles=["missing_role"],
properties={},
customActions=[]).dict()
with pytest.raises(UserNotAuthorizedToUseTemplate):
_ = await resource_repo.validate_input_against_template("template1", workspace_input, ResourceType.Workspace, ["test_role", "another_role"])
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
@patch("db.repositories.resources.ResourceRepository._validate_resource_parameters", return_value=None)
async def test_validate_input_against_template_valid_if_user_has_only_one_role(_, enriched_template_mock, resource_repo, workspace_input):
enriched_template_mock.return_value = ResourceTemplate(id="123",
name="template1",
description="description",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
required=[],
authorizedRoles=["test_role", "missing_role"],
properties={},
customActions=[]).dict()
template = await resource_repo.validate_input_against_template("template1", workspace_input, ResourceType.Workspace, ["test_role", "another_role"])
# does not throw
assert template.version == "0.1.0"
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
@patch("db.repositories.resources.ResourceRepository._validate_resource_parameters", return_value=None)
async def test_validate_input_against_template_valid_if_required_roles_set_is_empty(_, enriched_template_mock, resource_repo, workspace_input):
enriched_template_mock.return_value = ResourceTemplate(id="123",
name="template1",
description="description",
version="0.1.0",
resourceType=ResourceType.Workspace,
current=True,
required=[],
properties={},
customActions=[]).dict()
template = await resource_repo.validate_input_against_template("template1", workspace_input, ResourceType.Workspace, ["test_user_role"])
# does not throw
assert template.version == "0.1.0"
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
async def test_validate_input_against_nested_template_missing_nested_prop(enriched_template_mock, resource_repo):
enriched_template_mock.return_value = sample_nested_template()
# missing port
nested_input = WorkspaceInCreate(templateName="template1")
nested_input.properties['rules'] = [
{
'protocol': {
'method': 'post'
}
}
]
with pytest.raises(ValidationError):
await resource_repo.validate_input_against_template("template1", nested_input, ResourceType.Workspace)
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceRepository._get_enriched_template")
async def test_validate_input_against_nested_template_valid(enriched_template_mock, resource_repo):
enriched_template_mock.return_value = sample_nested_template()
# has required props, nested
nested_input = WorkspaceInCreate(templateName="template1")
nested_input.properties['rules'] = [
{
'protocol': {
'method': 'post',
'port': '1234'
}
}
]
resp_template = await resource_repo.validate_input_against_template("template1", nested_input, ResourceType.Workspace)
assert resp_template is not None
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceTemplateRepository.get_current_template")
async def test_get_enriched_template_returns_the_enriched_template(get_current_mock, resource_repo):
workspace_template = ResourceTemplate(id="abc", name="template1", description="", version="", resourceType=ResourceType.Workspace, current=True, required=[], properties={}, customActions=[])
get_current_mock.return_value = workspace_template
template = await resource_repo._get_enriched_template("template1", ResourceType.Workspace)
get_current_mock.assert_called_once_with('template1', ResourceType.Workspace, '')
assert "display_name" in template["properties"]
@pytest.mark.asyncio
@patch("db.repositories.resources.ResourceTemplateRepository.get_current_template")
async def test_get_enriched_template_returns_the_enriched_template_for_user_resources(get_current_mock, resource_repo):
user_resource_template = UserResourceTemplate(id="abc", name="template1", description="", version="", resourceType=ResourceType.Workspace, current=True, required=[], properties={}, customActions=[], parentWorkspaceService="parent-template1")
get_current_mock.return_value = user_resource_template
template = await resource_repo._get_enriched_template("template1", ResourceType.UserResource, "parent-template1")
get_current_mock.assert_called_once_with('template1', ResourceType.UserResource, 'parent-template1')
assert "display_name" in template["properties"]
@pytest.mark.asyncio
async def test_get_resource_dict_by_id_raises_entity_does_not_exist_if_no_resources_come_back(resource_repo):
item_id = "123"
resource_repo.read_item_by_id = AsyncMock(side_effect=CosmosResourceNotFoundError)
with pytest.raises(EntityDoesNotExist):
await resource_repo.get_resource_dict_by_id(item_id)
@pytest.mark.asyncio
@patch("db.repositories.resources_history.ResourceHistoryRepository.save_item", return_value=AsyncMock())
@patch('db.repositories.resources.ResourceRepository.validate_patch')
@patch('db.repositories.resources.ResourceRepository.get_timestamp', return_value=FAKE_UPDATE_TIMESTAMP)
async def test_patch_resource_preserves_property_history(_, __, ___, resource_repo, resource_history_repo):
"""
Tests that properties are copied into a history array and only certain values in the root are updated
"""
resource_repo.update_item_with_etag = AsyncMock(return_value=None)
resource_patch = ResourcePatch(isEnabled=True, properties={'display_name': 'updated name'})
etag = "some-etag-value"
user = create_test_user()
resource = sample_resource()
expected_resource = sample_resource()
expected_resource.properties['display_name'] = 'updated name'
expected_resource.resourceVersion = 1
expected_resource.user = user
expected_resource.updatedWhen = FAKE_UPDATE_TIMESTAMP
await resource_repo.patch_resource(resource, resource_patch, None, etag, None, resource_history_repo, user)
resource_repo.update_item_with_etag.assert_called_once_with(expected_resource, etag)
# now patch again
new_resource = copy.deepcopy(expected_resource) # new_resource is after the first patch
new_patch = ResourcePatch(isEnabled=False, properties={'display_name': 'updated name 2'})
expected_resource.resourceVersion = 2
expected_resource.properties['display_name'] = "updated name 2"
expected_resource.isEnabled = False
expected_resource.user = user
await resource_repo.patch_resource(new_resource, new_patch, None, etag, None, resource_history_repo, user)
resource_repo.update_item_with_etag.assert_called_with(expected_resource, etag)
@patch('db.repositories.resources.ResourceTemplateRepository.enrich_template')
def test_validate_patch_with_good_fields_passes(template_repo, resource_repo):
"""
Make sure that patch is NOT valid when non-updateable fields are included
"""
template_repo.enrich_template = MagicMock(return_value=sample_resource_template())
template = sample_resource_template()
# check it's valid when updating a single updateable prop
patch = ResourcePatch(isEnabled=True, properties={'vm_size': 'large'})
resource_repo.validate_patch(patch, template_repo, template)
@patch('db.repositories.resources.ResourceTemplateRepository.enrich_template')
def test_validate_patch_with_bad_fields_fails(template_repo, resource_repo):
"""
Make sure that patch is NOT valid when non-updateable fields are included
"""
template_repo.enrich_template = MagicMock(return_value=sample_resource_template())
template = sample_resource_template()
# check it's invalid when sending an unexpected field
patch = ResourcePatch(isEnabled=True, properties={'vm_size': 'large', 'unexpected_field': 'surprise!'})
with pytest.raises(ValidationError):
resource_repo.validate_patch(patch, template_repo, template)
# check it's invalid when sending a bad value
patch = ResourcePatch(isEnabled=True, properties={'vm_size': 'huge'})
with pytest.raises(ValidationError):
resource_repo.validate_patch(patch, template_repo, template)
# check it's invalid when trying to update a non-updateable field
patch = ResourcePatch(isEnabled=True, properties={'vm_size': 'large', 'os_image': 'linux'})
with pytest.raises(ValidationError):
resource_repo.validate_patch(patch, template_repo, template)
|
AzureTRE/api_app/tests_ma/test_db/test_repositories/test_resource_repository.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_db/test_repositories/test_resource_repository.py",
"repo_id": "AzureTRE",
"token_count": 8908
}
| 94 |
import pytest
from mock import call, patch
from models.domain.authentication import User, RoleAssignment
from models.domain.workspace import Workspace, WorkspaceRole
from services.aad_authentication import AzureADAuthorization
from services.access_service import AuthConfigValidationError
MOCK_MICROSOFT_GRAPH_URL = "https://graph.microsoft.com"
class PrincipalRole:
def __init__(self, principal_id, role_id, principal_type):
self.principal_id = principal_id
self.role_id = role_id
self.principal_type = principal_type
class UserPrincipal:
def __init__(self, principal_id, mail):
self.principal_id = principal_id
self.mail = mail
class GroupPrincipal:
def __init__(self, principal_id, members):
self.principal_id = principal_id
self.members = members
def test_extract_workspace__raises_error_if_client_id_not_available():
access_service = AzureADAuthorization()
with pytest.raises(AuthConfigValidationError):
access_service.extract_workspace_auth_information(data={"auth_type": "Manual"})
@patch(
"services.aad_authentication.AzureADAuthorization._get_app_auth_info",
return_value={"app_role_id_workspace_researcher": "1234"},
)
def test_extract_workspace__raises_error_if_owner_not_in_roles(get_app_auth_info_mock):
access_service = AzureADAuthorization()
with pytest.raises(AuthConfigValidationError):
access_service.extract_workspace_auth_information(data={"client_id": "1234"})
@patch(
"services.aad_authentication.AzureADAuthorization._get_app_auth_info",
return_value={"app_role_id_workspace_owner": "1234"},
)
def test_extract_workspace__raises_error_if_researcher_not_in_roles(
get_app_auth_info_mock,
):
access_service = AzureADAuthorization()
with pytest.raises(AuthConfigValidationError):
access_service.extract_workspace_auth_information(data={"client_id": "1234"})
@patch(
"services.aad_authentication.AzureADAuthorization._get_app_sp_graph_data",
return_value={},
)
def test_extract_workspace__raises_error_if_graph_data_is_invalid(
get_app_sp_graph_data_mock,
):
access_service = AzureADAuthorization()
with pytest.raises(AuthConfigValidationError):
access_service.extract_workspace_auth_information(data={"client_id": "1234"})
@patch("services.aad_authentication.AzureADAuthorization._get_app_sp_graph_data")
def test_extract_workspace__returns_sp_id_and_roles(get_app_sp_graph_data_mock):
get_app_sp_graph_data_mock.return_value = {
"value": [
{
"id": "12345",
"appRoles": [
{"id": "1abc3", "value": "WorkspaceResearcher"},
{"id": "1abc4", "value": "WorkspaceOwner"},
{"id": "1abc5", "value": "AirlockManager"},
],
"servicePrincipalNames": ["api://tre_ws_1234"],
}
]
}
expected_auth_info = {
"sp_id": "12345",
"scope_id": "api://tre_ws_1234",
"app_role_id_workspace_owner": "1abc4",
"app_role_id_workspace_researcher": "1abc3",
"app_role_id_workspace_airlock_manager": "1abc5",
}
access_service = AzureADAuthorization()
actual_auth_info = access_service.extract_workspace_auth_information(
data={"auth_type": "Manual", "client_id": "1234"}
)
assert actual_auth_info == expected_auth_info
@pytest.mark.parametrize(
"user, workspace, expected_role",
[
# user not a member of the workspace app
(
User(
roleAssignments=[RoleAssignment(resource_id="ab123", role_id="ab124")],
id="123",
name="test",
email="[email protected]",
),
Workspace(
id="abc",
etag="",
templateName="template-name",
templateVersion="0.1.0",
resourcePath="test",
properties={
"client_id": "1234",
"sp_id": "abc127",
"app_role_id_workspace_owner": "abc128",
"app_role_id_workspace_researcher": "abc129",
"app_role_id_workspace_airlock_manager": "abc130",
},
),
WorkspaceRole.NoRole,
),
# user is member of the workspace app but not in role
(
User(
roleAssignments=[RoleAssignment(resource_id="ab127", role_id="ab124")],
id="123",
name="test",
email="[email protected]",
),
Workspace(
id="abc",
etag="",
templateName="template-name",
templateVersion="0.1.0",
resourcePath="test",
properties={
"client_id": "1234",
"sp_id": "abc127",
"app_role_id_workspace_owner": "abc128",
"app_role_id_workspace_researcher": "abc129",
"app_role_id_workspace_airlock_manager": "abc130",
},
),
WorkspaceRole.NoRole,
),
# user has owner role in workspace
(
User(
roleAssignments=[
RoleAssignment(resource_id="abc127", role_id="abc128")
],
id="123",
name="test",
email="[email protected]",
),
Workspace(
id="abc",
etag="",
templateName="template-name",
templateVersion="0.1.0",
resourcePath="test",
properties={
"client_id": "1234",
"sp_id": "abc127",
"app_role_id_workspace_owner": "abc128",
"app_role_id_workspace_researcher": "abc129",
"app_role_id_workspace_airlock_manager": "abc130",
},
),
WorkspaceRole.Owner,
),
# user has researcher role in workspace
(
User(
roleAssignments=[
RoleAssignment(resource_id="abc127", role_id="abc129")
],
id="123",
name="test",
email="[email protected]",
),
Workspace(
id="abc",
etag="",
templateName="template-name",
templateVersion="0.1.0",
resourcePath="test",
properties={
"client_id": "1234",
"sp_id": "abc127",
"app_role_id_workspace_owner": "abc128",
"app_role_id_workspace_researcher": "abc129",
"app_role_id_workspace_airlock_manager": "abc130",
},
),
WorkspaceRole.Researcher,
),
# user has airlock manager role in workspace
(
User(
roleAssignments=[
RoleAssignment(resource_id="abc127", role_id="abc130")
],
id="123",
name="test",
email="[email protected]",
),
Workspace(
id="abc",
etag="",
templateName="template-name",
templateVersion="0.1.0",
resourcePath="test",
properties={
"client_id": "1234",
"sp_id": "abc127",
"app_role_id_workspace_owner": "abc128",
"app_role_id_workspace_researcher": "abc129",
"app_role_id_workspace_airlock_manager": "abc130",
},
),
WorkspaceRole.AirlockManager,
),
],
)
@patch("services.aad_authentication.AzureADAuthorization.get_identity_role_assignments")
def test_get_workspace_role_returns_correct_owner(
get_identity_role_assignments_mock,
user: User,
workspace: Workspace,
expected_role: WorkspaceRole,
):
get_identity_role_assignments_mock.return_value = user.roleAssignments
access_service = AzureADAuthorization()
actual_role = access_service.get_workspace_role(
user, workspace, access_service.get_identity_role_assignments(user.id)
)
assert actual_role == expected_role
@patch(
"services.aad_authentication.AzureADAuthorization.get_identity_role_assignments",
return_value=[("ab123", "ab124")],
)
def test_raises_auth_config_error_if_workspace_auth_config_is_not_set(_):
access_service = AzureADAuthorization()
user = User(id="123", name="test", email="[email protected]")
workspace_with_no_auth_config = Workspace(
id="abc",
etag="",
templateName="template-name",
templateVersion="0.1.0",
resourcePath="test",
)
with pytest.raises(AuthConfigValidationError):
_ = access_service.get_workspace_role(
user,
workspace_with_no_auth_config,
access_service.get_identity_role_assignments(user.id),
)
@patch(
"services.aad_authentication.AzureADAuthorization.get_identity_role_assignments",
return_value=[("ab123", "ab124")],
)
def test_raises_auth_config_error_if_auth_info_has_incorrect_roles(_):
access_service = AzureADAuthorization()
user = User(id="123", name="test", email="[email protected]")
workspace_with_auth_info_but_no_roles = Workspace(
id="abc",
templateName="template-name",
templateVersion="0.1.0",
etag="",
properties={"sp_id": "123", "roles": {}},
resourcePath="test",
)
with pytest.raises(AuthConfigValidationError):
_ = access_service.get_workspace_role(
user,
workspace_with_auth_info_but_no_roles,
access_service.get_identity_role_assignments(),
)
@patch("services.aad_authentication.AzureADAuthorization._get_user_role_assignments")
@patch("services.aad_authentication.AzureADAuthorization._get_user_emails")
@patch(
"services.aad_authentication.AzureADAuthorization._get_msgraph_token",
return_value="token",
)
def test_get_workspace_role_assignment_details_with_single_user_returns_user_mail_and_role_assignment(
_, users, roles
):
access_service = AzureADAuthorization()
# Build user response
user_principal_id = "user_principal_id"
user_email = "[email protected]"
user_response = get_mock_batch_response(
[UserPrincipal(user_principal_id, user_email)], []
)
users.return_value = user_response
# Build user role assignment response
workspace_owner_role_id = "1234"
roles_response = get_mock_role_response(
[PrincipalRole(user_principal_id, workspace_owner_role_id, "User")]
)
roles.return_value = roles_response
# Act
role_assignment_details = access_service.get_workspace_role_assignment_details(
Workspace(
id="id",
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"sp_id": "ab123",
"app_role_id_workspace_owner": workspace_owner_role_id,
"app_role_id_workspace_researcher": "ab125",
"app_role_id_workspace_airlock_manager": "ab130",
},
)
)
assert role_assignment_details["WorkspaceOwner"] == [user_email]
@patch("services.aad_authentication.AzureADAuthorization._get_user_role_assignments")
@patch("services.aad_authentication.AzureADAuthorization._get_user_emails")
@patch(
"services.aad_authentication.AzureADAuthorization._get_msgraph_token",
return_value="token",
)
def test_get_workspace_role_assignment_details_with_single_user_with_no_mail_is_not_returned(
_, users, roles
):
access_service = AzureADAuthorization()
# Build user response
user_principal_id = "user_principal_id"
user_response = get_mock_batch_response(
[UserPrincipal(user_principal_id, None)], []
)
users.return_value = user_response
# Build user role assignment response
workspace_owner_role_id = "1234"
roles_response = get_mock_role_response(
[PrincipalRole(user_principal_id, workspace_owner_role_id, "User")]
)
roles.return_value = roles_response
# Act
role_assignment_details = access_service.get_workspace_role_assignment_details(
Workspace(
id="id",
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"sp_id": "ab123",
"app_role_id_workspace_owner": workspace_owner_role_id,
"app_role_id_workspace_researcher": "ab125",
"app_role_id_workspace_airlock_manager": "ab130",
},
)
)
assert len(role_assignment_details) == 0
@patch("services.aad_authentication.AzureADAuthorization._get_user_role_assignments")
@patch("services.aad_authentication.AzureADAuthorization._get_user_emails")
@patch(
"services.aad_authentication.AzureADAuthorization._get_msgraph_token",
return_value="token",
)
def test_get_workspace_role_assignment_details_with_only_groups_assigned_returns_group_members(
_, users_and_groups, roles
):
access_service = AzureADAuthorization()
# Build group response
user_in_group = UserPrincipal("user_principal_id1", "[email protected]")
group_principal_id = "group_principal_id"
group_response = get_mock_batch_response(
[], [GroupPrincipal(group_principal_id, members=[user_in_group])]
)
users_and_groups.return_value = group_response
# Build user role assignment response
workspace_owner_role_id = "1234"
roles_response = get_mock_role_response(
[PrincipalRole(group_principal_id, workspace_owner_role_id, "Group")]
)
roles.return_value = roles_response
# Act
role_assignment_details = access_service.get_workspace_role_assignment_details(
Workspace(
id="id",
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"sp_id": "ab123",
"app_role_id_workspace_owner": workspace_owner_role_id,
"app_role_id_workspace_researcher": "ab125",
"app_role_id_workspace_airlock_manager": "ab130",
},
)
)
assert len(role_assignment_details) == 1
assert "[email protected]" in role_assignment_details["WorkspaceOwner"]
@patch("services.aad_authentication.AzureADAuthorization._get_user_role_assignments")
@patch("services.aad_authentication.AzureADAuthorization._get_user_emails")
@patch(
"services.aad_authentication.AzureADAuthorization._get_msgraph_token",
return_value="token",
)
def test_get_workspace_role_assignment_details_with_group_with_multiple_users_returned_as_expected(
_, users_and_groups, roles
):
access_service = AzureADAuthorization()
# Build group response
user_principal_id1 = "user_principal_id1"
user_email1 = "[email protected]"
user_principal_id2 = "user_principal_id2"
user_email2 = "[email protected]"
group_principal = GroupPrincipal(
"group_principal_id",
members=[
UserPrincipal(user_principal_id1, user_email1),
UserPrincipal(user_principal_id2, user_email2),
],
)
# Get batch response
users_groups_response = get_mock_batch_response([], [group_principal])
users_and_groups.return_value = users_groups_response
# Build user role assignment response
workspace_owner_role_id = "1234"
roles_response = get_mock_role_response(
[PrincipalRole(group_principal.principal_id, workspace_owner_role_id, "Group")]
)
roles.return_value = roles_response
# Act
role_assignment_details = access_service.get_workspace_role_assignment_details(
Workspace(
id="id",
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"sp_id": "ab123",
"app_role_id_workspace_owner": workspace_owner_role_id,
"app_role_id_workspace_researcher": "ab125",
"app_role_id_workspace_airlock_manager": "ab130",
},
)
)
assert len(role_assignment_details) == 1
assert "[email protected]" in role_assignment_details["WorkspaceOwner"]
assert "[email protected]" in role_assignment_details["WorkspaceOwner"]
@patch("services.aad_authentication.AzureADAuthorization._get_user_role_assignments")
@patch("services.aad_authentication.AzureADAuthorization._get_user_emails")
@patch(
"services.aad_authentication.AzureADAuthorization._get_msgraph_token",
return_value="token",
)
def test_get_workspace_role_assignment_details_with_groups_and_users_assigned_returned_as_expected(
_, users_and_groups, roles
):
access_service = AzureADAuthorization()
# Build group response
user_principal_id1 = "user_principal_id1"
user_email1 = "[email protected]"
group_principal = GroupPrincipal(
"group_principal_id", members=[UserPrincipal(user_principal_id1, user_email1)]
)
# User assigned to the role, not in any group
user_principal_id2 = "user_principal_id2"
user_email2 = "[email protected]"
# Get batch response
users_groups_response = get_mock_batch_response(
[UserPrincipal(user_principal_id2, user_email2)], [group_principal]
)
users_and_groups.return_value = users_groups_response
# Build user role assignment response
workspace_owner_role_id = "1234"
roles_response = get_mock_role_response(
[
PrincipalRole(user_principal_id1, workspace_owner_role_id, "User"),
PrincipalRole(user_principal_id2, workspace_owner_role_id, "User"),
PrincipalRole(
group_principal.principal_id, workspace_owner_role_id, "Group"
),
]
)
roles.return_value = roles_response
# Act
role_assignment_details = access_service.get_workspace_role_assignment_details(
Workspace(
id="id",
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"sp_id": "ab123",
"app_role_id_workspace_owner": workspace_owner_role_id,
"app_role_id_workspace_researcher": "ab125",
"app_role_id_workspace_airlock_manager": "ab130",
},
)
)
assert len(role_assignment_details) == 1
assert "[email protected]" in role_assignment_details["WorkspaceOwner"]
assert "[email protected]" in role_assignment_details["WorkspaceOwner"]
@patch("services.aad_authentication.AzureADAuthorization._get_auth_header")
@patch("services.aad_authentication.AzureADAuthorization._get_batch_users_by_role_assignments_body")
@patch("requests.post")
def test_get_user_emails_with_batch_of_more_than_20_requests(mock_graph_post, mock_get_batch_users_by_role_assignments_body, mock_headers):
# Arrange
access_service = AzureADAuthorization()
roles_graph_data = [{"id": "role1"}, {"id": "role2"}]
msgraph_token = "token"
batch_endpoint = access_service._get_batch_endpoint()
# mock the response of _get_auth_header
headers = {"Authorization": f"Bearer {msgraph_token}"}
mock_headers.return_value = headers
headers["Content-type"] = "application/json"
# mock the response of the get batch request for 30 users
batch_request_body_first_20 = {
"requests": [
{"id": f"{i}", "method": "GET", "url": f"/users/{i}"} for i in range(20)
]
}
batch_request_body_last_10 = {
"requests": [
{"id": f"{i}", "method": "GET", "url": f"/users/{i}"} for i in range(20, 30)
]
}
batch_request_body = {
"requests": [
{"id": f"{i}", "method": "GET", "url": f"/users/{i}"} for i in range(30)
]
}
mock_get_batch_users_by_role_assignments_body.return_value = batch_request_body
# Mock the response of the post request
mock_graph_post_response = {"responses": [{"id": "user1"}, {"id": "user2"}]}
mock_graph_post.return_value.json.return_value = mock_graph_post_response
# Act
users_graph_data = access_service._get_user_emails(roles_graph_data, msgraph_token)
# Assert
assert len(users_graph_data["responses"]) == 4
calls = [
call(
f"{batch_endpoint}",
json=batch_request_body_first_20,
headers=headers
),
call(
f"{batch_endpoint}",
json=batch_request_body_last_10,
headers=headers
)
]
mock_graph_post.assert_has_calls(calls, any_order=True)
def get_mock_batch_response(user_principals, group_principals):
response_body = {"responses": []}
for user_principal in user_principals:
response_body["responses"].append(
get_mock_user_response(user_principal.principal_id, user_principal.mail)
)
for group_principal in group_principals:
response_body["responses"].append(get_mock_group_response(group_principal))
return response_body
def get_mock_user_response(principal_id, mail):
headers = '{"Cache-Control":"no-cache","x-ms-resource-unit":"1","OData-Version":"4.0","Content-Type":"application/json;odata.metadata=minimal;odata.streaming=true;IEEE754Compatible=false;charset=utf-8"}'
user_odata = f'@odata.context":"{MOCK_MICROSOFT_GRAPH_URL}/v1.0/$metadata#users(mail,id)/$entity'
user_response_body = {
"id": "1",
"status": 200,
"headers": headers,
"body": {"@odata.context": user_odata, "mail": mail, "id": principal_id},
}
return user_response_body
def get_mock_group_response(group):
headers = '{"Cache-Control":"no-cache","x-ms-resource-unit":"1","OData-Version":"4.0","Content-Type":"application/json;odata.metadata=minimal;odata.streaming=true;IEEE754Compatible=false;charset=utf-8"}'
group_odata = f"{MOCK_MICROSOFT_GRAPH_URL}/v1.0/$metadata#directoryObjects(mail,id)"
group_members_body = []
for member in group.members:
group_members_body.append(
{
"@odata.type": "#microsoft.graph.user",
"mail": member.mail,
"id": member.principal_id,
}
)
group_response_body = {
"id": group.principal_id,
"status": 200,
"headers": headers,
"body": {"@odata.context": group_odata, "value": group_members_body},
}
return group_response_body
def get_mock_role_response(principal_roles):
odata_context = f'@odata.context":"{MOCK_MICROSOFT_GRAPH_URL}/v1.0/$metadata#servicePrincipals(workspace-client-id))/appRoleAssignedTo(appRoleId,principalId,principalType)'
response = {"@odata.context": odata_context, "value": []}
for principal_role in principal_roles:
response["value"].append(
{
"appRoleId": principal_role.role_id,
"principalId": principal_role.principal_id,
"principalType": principal_role.principal_type,
}
)
return response
|
AzureTRE/api_app/tests_ma/test_services/test_aad_access_service.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_services/test_aad_access_service.py",
"repo_id": "AzureTRE",
"token_count": 10999
}
| 95 |
from logging import Logger
import sys
from time import sleep
import click
from tre.api_client import ApiClient
from tre.output import output
def get_operation_id_completion(ctx: click.Context, log: Logger, list_url: str, param: click.Parameter, incomplete: str, scope_id: str = None):
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'GET', list_url, scope_id=scope_id)
if response.is_success:
ids = [workspace["id"] for workspace in response.json()["operations"]]
return [id for id in ids if id.startswith(incomplete)]
def is_operation_state_terminal(state: str) -> bool:
# In the absence of a field on the operation indicating whether it is completed or not,
# we maintain a list here.
# Note that we test against 'active' states
# This way, a new state will be considered terminal (and not a success)
# so we avoid a case where waiting for copmletion continues indefinitely
# when there is a new state (and we return a non-successful status to
# highlight it)
return state not in [
'deleting',
'deploying',
'awaiting_action',
'invoking_action',
'pipeline_deploying',
'pipeline_running',
'not_deployed',
'awaiting_deployment',
'awaiting_deletion',
'awaiting_update',
'updating'
]
def is_operation_state_success(state: str) -> bool:
return state in [
'deleted',
'deployed',
'action_succeeded',
'pipeline_succeeded',
'updated'
]
def default_operation_table_query_list():
return r"operations[].{id:id, status:status, action:action, resourcePath:resourcePath, message:message}"
def default_operation_table_query_single():
return r"operation.{id:id, status:status, action:action, resourcePath:resourcePath, message:message}"
def operation_show(log, operation_url, no_wait, output_format, query, suppress_output: bool = False, scope_id: str = None):
wait_for_completion = not no_wait
client = ApiClient.get_api_client_from_config()
response = client.call_api(
log,
'GET',
operation_url,
scope_id=scope_id
)
response_json = response.json()
action = response_json['operation']['action']
state = response_json['operation']['status']
while wait_for_completion and not is_operation_state_terminal(state):
click.echo(f'Operation state: {state} (action={action})',
err=True, nl=False)
sleep(5)
click.echo(' - refreshing...', err=True)
response = client.call_api(
log,
'GET',
operation_url,
scope_id=scope_id
)
response_json = response.json()
action = response_json['operation']['action']
state = response_json['operation']['status']
if not suppress_output or not response.is_success:
output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single())
if wait_for_completion and not is_operation_state_success(state):
sys.exit(1)
return response.text
def operations_list(log, operations_url, output_format, query, scope_id: str = None):
client = ApiClient.get_api_client_from_config()
response = client.call_api(
log,
'GET',
operations_url,
scope_id=scope_id
)
output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_list())
|
AzureTRE/cli/tre/commands/operation.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/operation.py",
"repo_id": "AzureTRE",
"token_count": 1387
}
| 96 |
import json
import logging
import click
from tre.api_client import ApiClient
from tre.output import output, output_option, query_option
from tre.commands.workspace_service_templates.contexts import pass_workspace_service_template_context, WorkspaceServiceTemplateContext
@click.group(name="user-resource-templates", help="List user-resource-templates ")
def user_resource_templates():
pass
@click.command(name="list", help="List user-resource-templates")
@output_option()
@query_option()
@pass_workspace_service_template_context
def user_resource_templates_list(workspace_service_template_context: WorkspaceServiceTemplateContext, output_format, query):
log = logging.getLogger(__name__)
client = ApiClient.get_api_client_from_config()
template_name = workspace_service_template_context.template_name
if template_name is None:
raise click.UsageError('Missing workspace service name')
response = client.call_api(
log,
'GET',
f'/api/workspace-service-templates/{template_name}/user-resource-templates',
)
output(response, output_format=output_format, query=query, default_table_query=r"templates[].{name:name, title: title, description:description}")
@click.command(name="new", help="Register a new user resource template")
@click.option('--definition', help='JSON definition for the template', required=False)
@click.option('--definition-file', help='File containing JSON definition for the template', required=False, type=click.File("r"))
@output_option()
@query_option()
@pass_workspace_service_template_context
def user_resource_templates_create(workspace_service_template_context: WorkspaceServiceTemplateContext, definition, definition_file, output_format, query):
log = logging.getLogger(__name__)
if definition is None:
if definition_file is None:
raise click.UsageError('Please specify either a definition or a definition file')
definition = definition_file.read()
template_name = workspace_service_template_context.template_name
if template_name is None:
raise click.UsageError('Missing workspace service name')
definition_dict = json.loads(definition)
client = ApiClient.get_api_client_from_config()
click.echo("Registering template...", err=True)
response = client.call_api(
log,
'POST',
f'/api/workspace-service-templates/{template_name}/user-resource-templates',
json_data=definition_dict
)
output(response, output_format=output_format, query=query, default_table_query=r"{id: id, name:name, title: title, description:description}")
return response.text
user_resource_templates.add_command(user_resource_templates_list)
user_resource_templates.add_command(user_resource_templates_create)
|
AzureTRE/cli/tre/commands/workspace_service_templates/user_resource_templates/user_resource_templates.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/workspace_service_templates/user_resource_templates/user_resource_templates.py",
"repo_id": "AzureTRE",
"token_count": 894
}
| 97 |
{
"$schema": "http://json-schema.org/draft-04/schema#",
"$id": "https://github.com/microsoft/AzureTRE/schema/config_schema.json",
"title": "AzureTRE configuration file",
"description": "This document contains all the configuration needed to deploy and setup AzureTRE.",
"type": "object",
"properties": {
"location": {
"description": "The Azure location (region) for all resources.",
"type": "string",
"pattern": "[a-z]+"
},
"tre_id": {
"description": "TRE unique identifier",
"type": "string",
"pattern": "^[a-z0-9]*$",
"maxLength": 11
},
"management": {
"description": "Management configuration",
"type": "object",
"properties": {
"mgmt_resource_group_name": {
"description": "The shared resource group for all management resources, including the storage account.",
"type": "string",
"pattern": "^[-\\w\\._\\(\\)]+$",
"maxLength": 90
},
"mgmt_storage_account_name": {
"description": "The name of the storage account to hold the Terraform state and other deployment artifacts.",
"type": "string",
"pattern": "[A-Za-z09]+",
"minLength": 3,
"maxLength": 24
},
"terraform_state_container_name": {
"description": "Optional. The name of the blob container to hold the Terraform state. Default value is `tfstate`.",
"type": "string",
"pattern": "^[a-z0-9](?!.*--)[a-z0-9-]{1,61}[a-z0-9]$",
"minLength": 3,
"maxLength": 63
},
"acr_name": {
"description": "A globally unique name for the Azure Container Registry (ACR) that will be created to store deployment images.",
"type": "string",
"pattern": "^[a-zA-Z0-9]*$",
"minLength": 5,
"maxLength": 50
},
"arm_subscription_id": {
"description": "The Azure subscription ID for all resources.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
}
},
"required": [
"mgmt_resource_group_name",
"mgmt_storage_account_name",
"terraform_state_container_name",
"acr_name",
"arm_subscription_id"
]
},
"tre": {
"description": "TRE configuration",
"type": "object",
"properties": {
"enable_airlock_malware_scanning": {
"description": "Allow airlock malware scanning.",
"type": "boolean"
},
"core_address_space": {
"description": "TRE core address spaces.",
"type": "string"
},
"tre_address_space": {
"description": "TRE address spaces.",
"type": "string"
},
"enable_swagger": {
"description": "Determines whether the Swagger interface for the API will be available.",
"type": "boolean"
},
"tre_url": {
"description": "Url for the TRE environment.",
"type": "string",
"pattern": "^https?://"
}
}
},
"authentication": {
"description": "Authentication configuration",
"type": "object",
"properties": {
"aad_tenant_id": {
"description": "Tenant id against which auth is performed.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
},
"auto_workspace_app_registration": {
"description": "This identity is used to manage other AAD applications that it owns. Read more about it here: docs/tre-admins/auth.md",
"type": "boolean"
},
"auto_workspace_group_creation": {
"description": "This identity can create security groups aligned to each applciation role. Read more about it here: docs/tre-admins/auth.md",
"type": "boolean"
},
"api_client_id": {
"description": "API application (client) ID.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
},
"api_client_secret": {
"description": "API application client secret.",
"type": "string",
"minLength": 11
},
"swagger_ui_client_id": {
"description": "Swagger/UI application (client) ID.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
},
"application_admin_client_id": {
"description": "This client will administer AAD Applications for TRE.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
},
"application_admin_client_secret": {
"description": "Secret to client which will administer AAD Applications for TRE",
"type": "string",
"minLength": 11
},
"test_account_client_id": {
"description": "This is the app that will run the tests for you.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
},
"test_account_client_secret": {
"description": "This is the secret to an app that will run the tests for you",
"type": "string",
"minLength": 11
},
"workspace_api_client_id": {
"description": "Workspace AD Application. This will be created for you for future use - when creating workspaces.",
"type": "string",
"pattern": "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"
},
"workspace_api_client_secret": {
"description": "Workspace AD Application secret. This will be created for you for future use - when creating workspaces.",
"type": "string",
"minLength": 11
}
},
"required": [
"aad_tenant_id"
]
}
},
"required": [
"location",
"tre_id"
]
}
|
AzureTRE/config_schema.json/0
|
{
"file_path": "AzureTRE/config_schema.json",
"repo_id": "AzureTRE",
"token_count": 2949
}
| 98 |
locals {
staticweb_storage_name = lower(replace("stweb${var.tre_id}", "-", ""))
staticweb_backend_pool_name = "beap-staticweb"
api_backend_pool_name = "beap-api"
app_path_map_name = "upm-application"
redirect_path_map_name = "upm-redirect"
insecure_frontend_port_name = "feport-insecure"
secure_frontend_port_name = "feport-secure"
frontend_ip_configuration_name = "feip-public"
api_http_setting_name = "be-htst-api"
staticweb_http_setting_name = "be-htst-staticweb"
api_probe_name = "hp-api"
insecure_listener_name = "httplstn-insecure"
secure_listener_name = "httplstn-secure"
redirect_request_routing_rule_name = "rqrt-redirect"
request_routing_rule_name = "rqrt-application"
redirect_configuration_name = "rdrcfg-tosecure"
certificate_name = "cert-primary"
tre_core_tags = {
tre_id = var.tre_id
tre_core_service_id = var.tre_id
}
appgateway_diagnostic_categories_enabled = ["ApplicationGatewayAccessLog", "ApplicationGatewayPerformanceLog", "ApplicationGatewayFirewallLog"]
}
|
AzureTRE/core/terraform/appgateway/locals.tf/0
|
{
"file_path": "AzureTRE/core/terraform/appgateway/locals.tf",
"repo_id": "AzureTRE",
"token_count": 449
}
| 99 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# set -o xtrace
# These variables are loaded in for us
# shellcheck disable=SC2154
../../devops/scripts/terraform_wrapper.sh -g "${TF_VAR_mgmt_resource_group_name}" \
-s "${TF_VAR_mgmt_storage_account_name}" \
-n "${TF_VAR_terraform_state_container_name}" \
-k "${TRE_ID}" -c "terraform destroy -auto-approve"
|
AzureTRE/core/terraform/destroy.sh/0
|
{
"file_path": "AzureTRE/core/terraform/destroy.sh",
"repo_id": "AzureTRE",
"token_count": 274
}
| 100 |
#!/bin/bash
set -e
if [ ! -f ../tre_output.json ]; then
# Connect to the remote backend of Terraform
export TF_LOG=""
# shellcheck disable=SC2154
terraform init -input=false -backend=true -reconfigure \
-backend-config="resource_group_name=$TF_VAR_mgmt_resource_group_name" \
-backend-config="storage_account_name=$TF_VAR_mgmt_storage_account_name" \
-backend-config="container_name=$TF_VAR_terraform_state_container_name" \
-backend-config="key=${TRE_ID}"
# Convert the output to json
terraform output -json > ../tre_output.json
fi
# Now create an .env file
./json-to-env.sh < ../tre_output.json > ../private.env
# Pull in the core templates environment variables so we can build up new key/value pairs
if [ -f ../.env ]; then
# shellcheck disable=SC1091
source ../.env
fi
# Add a few extra values to the file to help us (i.e. for local debugging api_app and resource processor)
# shellcheck disable=SC2129
echo "TEST_WORKSPACE_APP_ID='${WORKSPACE_API_CLIENT_ID}'" >> ../private.env
echo "TEST_WORKSPACE_APP_SECRET='${WORKSPACE_API_CLIENT_SECRET}'" >> ../private.env
# These next ones from Check Dependencies
echo "SUBSCRIPTION_ID='${SUB_ID}'" >> ../private.env
echo "AZURE_SUBSCRIPTION_ID='${SUB_ID}'" >> ../private.env
echo "AZURE_TENANT_ID='${TENANT_ID}'" >> ../private.env
|
AzureTRE/core/terraform/outputs.sh/0
|
{
"file_path": "AzureTRE/core/terraform/outputs.sh",
"repo_id": "AzureTRE",
"token_count": 487
}
| 101 |
#!/bin/bash
set -euo pipefail
# Use this for debug only
# set -o xtrace
# AZURE_CORE_OUTPUT=jsonc # force CLI output to JSON for the script (user can still change default for interactive usage in the dev container)
function show_usage()
{
cat << USAGE
Utility script for creating app registrations required by Azure TRE. This script will create the API and Client
Applications. The Client Application is the public facing app, whereas the API is an internal AAD Application.
You must be logged in using Azure CLI with sufficient privileges to modify Azure Active Directory to run this script.
Usage: $0 -n <app-name> [-r <reply-url>] [-a] [-s] [--automation-account]
Options:
-n,--name Required. The prefix for the app (registration) names e.g., "TRE", or "Workspace One".
-u,--tre-url TRE URL, used to construct auth redirection URLs for the UI and Swagger app.
-a,--admin-consent Optional, but recommended. Grants admin consent for the app registrations, when this flag is set.
Requires directory admin privileges to the Azure AD in question.
-t,--automation-clientid Optional, when --workspace is specified the client ID of the automation account can be added to the TRE workspace.
-r,--reset-password Optional, switch to automatically reset the password. Default 0
Examples:
1. $0 -n TRE -r https://mytre.region.cloudapp.azure.com -a
Using an Automation account
3. $0 --name 'TRE' --tre-url https://mytre.region.cloudapp.azure.com --admin-consent --automation-account
USAGE
exit 2
}
if ! command -v az &> /dev/null; then
echo "This script requires Azure CLI" 1>&2
exit 1
fi
if [[ $(az account list --only-show-errors -o json | jq 'length') -eq 0 ]]; then
echo "Please run az login -t <tenant> --allow-no-subscriptions"
exit 1
fi
# Get the directory that this script is in
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
declare resetPassword=0
declare grantAdminConsent=0
declare appName=""
declare appId=""
declare uxAppName=""
declare uxAppId=""
declare treUrl=""
declare currentUserId=""
declare automationAppId=""
declare automationAppObjectId=""
declare msGraphUri=""
declare spPassword=""
# Initialize parameters specified from command line
while [[ $# -gt 0 ]]; do
case "$1" in
-n|--name)
appName=$2
shift 2
;;
-a|--admin-consent)
grantAdminConsent=1
shift 1
;;
-u|--tre-url)
treUrl=$2
shift 2
;;
-t|--automation-clientid)
automationAppId=$2
shift 2
;;
-r|--reset-password)
resetPassword=$2
shift 2
;;
*)
echo "Invalid option: $1."
show_usage
;;
esac
done
###################################
# CHECK INCOMMING PARAMETERS #
###################################
if [[ -z "$appName" ]]; then
echo "Please specify the application name" 1>&2
show_usage
fi
uxAppName="$appName UX"
appName="$appName API"
currentUserId=$(az ad signed-in-user show --query 'id' --output tsv --only-show-errors)
msGraphUri="$(az cloud show --query endpoints.microsoftGraphResourceId --output tsv)/v1.0"
tenant=$(az rest -m get -u "${msGraphUri}/domains" -o json | jq -r '.value[] | select(.isDefault == true) | .id')
echo -e "\e[96mCreating the API/UX Application in the \"${tenant}\" Azure AD tenant.\e[0m"
# Load in helper functions
# shellcheck disable=SC1091
source "${DIR}/get_existing_app.sh"
# shellcheck disable=SC1091
source "${DIR}/grant_admin_consent.sh"
# shellcheck disable=SC1091
source "${DIR}/wait_for_new_app_registration.sh"
# shellcheck disable=SC1091
source "${DIR}/create_or_update_service_principal.sh"
# shellcheck disable=SC1091
source "${DIR}/get_msgraph_access.sh"
# shellcheck disable=SC1091
source "${DIR}/update_resource_access.sh"
# Generate GUIDS
userRoleId=$(cat /proc/sys/kernel/random/uuid)
adminRoleId=$(cat /proc/sys/kernel/random/uuid)
userImpersonationScopeId=$(cat /proc/sys/kernel/random/uuid)
appObjectId=""
# Get an existing object if it's been created before.
existingApp=$(get_existing_app --name "${appName}")
if [[ -n ${existingApp} ]]; then
appObjectId=$(echo "${existingApp}" | jq -r '.id')
userRoleId=$(echo "$existingApp" | jq -r '.appRoles[] | select(.value == "TREUser").id')
adminRoleId=$(echo "$existingApp" | jq -r '.appRoles[] | select(.value == "TREAdmin").id')
userImpersonationScopeId=$(echo "$existingApp" | jq -r '.api.oauth2PermissionScopes[] | select(.value == "user_impersonation").id')
if [[ -z "${userRoleId}" ]]; then userRoleId=$(cat /proc/sys/kernel/random/uuid); fi
if [[ -z "${adminRoleId}" ]]; then adminRoleId=$(cat /proc/sys/kernel/random/uuid); fi
if [[ -z "${userImpersonationScopeId}" ]]; then userImpersonationScopeId=$(cat /proc/sys/kernel/random/uuid); fi
fi
msGraphAppId="00000003-0000-0000-c000-000000000000"
msGraphObjectId=$(az ad sp show --id ${msGraphAppId} --query "id" --output tsv --only-show-errors)
roleUserReadAll=$(get_msgraph_role "User.Read.All" )
roleDirectoryReadAll=$(get_msgraph_role "Directory.Read.All" )
scope_email=$(get_msgraph_scope "email")
scope_profile=$(get_msgraph_scope "profile")
scope_openid=$(get_msgraph_scope "openid")
scope_offline_access=$(get_msgraph_scope "offline_access")
appDefinition=$(jq -c . << JSON
{
"displayName": "${appName}",
"api": {
"requestedAccessTokenVersion": 2,
"oauth2PermissionScopes": [
{
"adminConsentDescription": "Allow the app to access the TRE API on behalf of the signed-in user.",
"adminConsentDisplayName": "Access the TRE API on behalf of signed-in user",
"id": "${userImpersonationScopeId}",
"isEnabled": true,
"type": "User",
"userConsentDescription": "Allow the app to access the TRE API on your behalf.",
"userConsentDisplayName": "Access the TRE API",
"value": "user_impersonation"
}
]
},
"appRoles": [
{
"id": "${userRoleId}",
"allowedMemberTypes": [ "User", "Application" ],
"description": "Provides access to the ${appName} application.",
"displayName": "TRE Users",
"isEnabled": true,
"origin": "Application",
"value": "TREUser"
},
{
"id": "${adminRoleId}",
"allowedMemberTypes": [ "User", "Application" ],
"description": "Provides resource administrator access to the ${appName}.",
"displayName": "TRE Administrators",
"isEnabled": true,
"origin": "Application",
"value": "TREAdmin"
}
],
"signInAudience": "AzureADMyOrg",
"requiredResourceAccess": [
{
"resourceAppId": "${msGraphAppId}",
"resourceAccess": [
${roleUserReadAll},
${roleDirectoryReadAll},
$scope_email,
$scope_openid,
$scope_profile
]
}
]
}
JSON
)
# Is the app already registered?
if [[ -n ${appObjectId} ]]; then
echo "Updating \"${appName}\" app registration (ObjectId: \"${appObjectId}\")"
az rest --method PATCH --uri "${msGraphUri}/applications/${appObjectId}" --headers Content-Type=application/json --body "${appDefinition}"
appId=$(az ad app show --id "${appObjectId}" --query "appId" --output tsv --only-show-errors)
echo "Updated \"${appName}\" app registration (AppId: \"${appId}\")"
else
echo "Creating \"${appName}\" app registration."
appId=$(az rest --method POST --uri "${msGraphUri}/applications" --headers Content-Type=application/json --body "${appDefinition}" --output tsv --query "appId")
# Poll until the app registration is found in the listing.
wait_for_new_app_registration "${appId}"
az ad app update --id "${appId}" --identifier-uris "api://${appId}" --only-show-errors
fi
# Make the current user an owner of the application.
az ad app owner add --id "${appId}" --owner-object-id "$currentUserId" --only-show-errors
# Create a Service Principal for the app.
spPassword=$(create_or_update_service_principal "${appId}" "${resetPassword}")
spId=$(az ad sp list --filter "appId eq '${appId}'" --query '[0].id' --output tsv --only-show-errors)
# needed to make the API permissions change effective, this must be done after SP creation...
echo
echo "Running 'az ad app permission grant' to make changes effective."
az ad app permission grant --id "$spId" --api "$msGraphObjectId" --scope "email openid profile" --only-show-errors
# Grant admin consent on the required resource accesses (Graph API)
if [[ $grantAdminConsent -eq 1 ]]; then
echo "Granting admin consent for '${appName}' app (service principal ID ${spId}) - NOTE: Directory admin privileges required for this step"
directoryReadAllId=$(az ad sp show --id ${msGraphAppId} --query "appRoles[?value=='Directory.Read.All'].id" --output tsv --only-show-errors)
grant_admin_consent "${spId}" "$msGraphObjectId" "${directoryReadAllId}"
userReadAllId=$(az ad sp show --id ${msGraphAppId} --query "appRoles[?value=='User.Read.All'].id" --output tsv --only-show-errors)
grant_admin_consent "${spId}" "${msGraphObjectId}" "${userReadAllId}"
fi
# Create the UX App Registration
redirectUris="\"http://localhost:8000/api/docs/oauth2-redirect\", \"http://localhost:3000\""
if [[ -n ${treUrl} ]]; then
echo "Adding reply/redirect URL \"${treUrl}\" to \"${appName}\""
redirectUris="${redirectUris}, \"${treUrl}\", \"${treUrl}/api/docs/oauth2-redirect\""
fi
uxAppDefinition=$(jq -c . << JSON
{
"displayName": "${uxAppName}",
"signInAudience": "AzureADMyOrg",
"requiredResourceAccess": [
{
"resourceAppId": "${msGraphAppId}",
"resourceAccess": [
${scope_openid},
${scope_offline_access}
]
},
{
"resourceAppId": "${appId}",
"resourceAccess": [
{
"id": "${userImpersonationScopeId}",
"type": "Scope"
}
]
}
],
"spa": {
"redirectUris": [
${redirectUris}
]
}
}
JSON
)
# Is the UX app already registered?
existingUXApp=$(get_existing_app --name "${uxAppName}")
if [[ -n ${existingUXApp} ]]; then
uxObjectId=$(echo "${existingUXApp}" | jq -r '.id')
echo "Updating \"${uxAppName}\" with ObjectId \"${uxObjectId}\""
az rest --method PATCH --uri "${msGraphUri}/applications/${uxObjectId}" --headers Content-Type=application/json --body "${uxAppDefinition}"
uxAppId=$(az ad app show --id "${uxObjectId}" --query "appId" --output tsv --only-show-errors)
echo "UX app registration with AppId \"${uxAppId}\" updated."
else
echo "Creating \"${uxAppName}\" app registration."
uxAppId=$(az rest --method POST --uri "${msGraphUri}/applications" --headers Content-Type=application/json --body "${uxAppDefinition}" --output tsv --query "appId")
# Poll until the app registration is found in the listing.
wait_for_new_app_registration "${uxAppId}"
fi
# See if a service principal already exists
uxSpId=$(az ad sp list --filter "appId eq '${uxAppId}'" --query '[0].id' --output tsv --only-show-errors)
# If not, create a new service principal
if [[ -z "$uxSpId" ]]; then
uxSpId=$(az ad sp create --id "${uxAppId}" --query 'id' --output tsv --only-show-errors)
wait_for_new_service_principal "${uxSpId}"
fi
# Make the current user an owner of the application.
az ad app owner add --id "${uxAppId}" --owner-object-id "${currentUserId}" --only-show-errors
az ad app owner add --id "${uxAppId}" --owner-object-id "${uxSpId}" --only-show-errors
echo "Granting delegated access for \"${uxAppName}\" (service principal ID ${uxSpId})"
az ad app permission grant --id "$uxSpId" --api "$msGraphObjectId" --scope "offline_access openid" --only-show-errors
az ad app permission grant --id "$uxSpId" --api "$appId" --scope "user_impersonation" --only-show-errors
if [[ -n ${automationAppId} ]]; then
existingAutomationApp=$(get_existing_app --id "${automationAppId}")
automationAppObjectId=$(echo "${existingAutomationApp}" | jq -r .id)
automationAppName=$(echo "${existingAutomationApp}" | jq -r .displayName)
echo "Found '${automationAppName}' with ObjectId: '${automationAppObjectId}'"
# This is the new API Access we require.
automationApiAccess=$(jq -c .requiredResourceAccess << JSON
{
"requiredResourceAccess": [
{
"resourceAppId": "${appId}",
"resourceAccess": [
{
"id": "${userImpersonationScopeId}",
"type": "Scope"
},
{
"id": "${adminRoleId}",
"type": "Role"
}
]
}
]
}
JSON
)
# Utility function to add the required permissions.
update_resource_access "$msGraphUri" "${automationAppObjectId}" "${appId}" "${automationApiAccess}"
# Grant admin consent for the application scopes
if [[ $grantAdminConsent -eq 1 ]]; then
echo "Granting admin consent for \"${automationAppName}\" (App ID ${automationAppId})"
automationSpId=$(az ad sp list --filter "appId eq '${automationAppId}'" --query '[0].id' --output tsv --only-show-errors)
echo "Found Service Principal \"$automationSpId\" for \"${automationAppName}\"."
grant_admin_consent "${automationSpId}" "${spId}" "${adminRoleId}"
az ad app permission grant --id "$automationSpId" --api "$appId" --scope "user_impersonation" --only-show-errors
fi
fi
# Set outputs in configuration file
yq -i ".authentication.api_client_id |= \"${appId}\"" config.yaml
yq -i ".authentication.api_client_secret |= \"${spPassword}\"" config.yaml
yq -i ".authentication.swagger_ui_client_id |= \"${uxAppId}\"" config.yaml
echo "api_client_id=\"${appId}\""
echo "api_client_secret=\"${spPassword}\""
echo "swagger_ui_client_id=\"${uxAppId}\""
if [[ $grantAdminConsent -eq 0 ]]; then
echo -e "\e[96mNOTE: Make sure the API permissions of the app registrations have admin consent granted."
echo -e "Run this script with flag -a to grant admin consent or configure the registrations in Azure Portal."
echo -e "See APP REGISTRATIONS in documentation for more information.\e[0m"
fi
|
AzureTRE/devops/scripts/aad/create_api_application.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/aad/create_api_application.sh",
"repo_id": "AzureTRE",
"token_count": 5372
}
| 102 |
#!/bin/bash
# This script cleans/deletes Azure environments created in CI.
# A resource group will be evaluated if its name starts with aspecific prefix
# and tagged with the 'ci_git_ref' tag.
# If the RG was created as part of a PR, then it will be deleted if the PR
# isn't open anymore. In all other cases (like regular branches), it will
# be deleted if the branch doesn't exist.
set -o errexit
set -o pipefail
set -o nounset
# set -o xtrace
function stopEnv ()
{
local tre_rg="$1"
local tre_id=${tre_rg#"rg-"}
TRE_ID=${tre_id} devops/scripts/control_tre.sh stop
}
az config set extension.use_dynamic_install=yes_without_prompt
echo "Refs:"
git show-ref
open_prs=$(gh pr list --state open --json number,title,headRefName,updatedAt)
# Resource groups that start with a specific string and have the ci_git_ref tag whose value starts with "ref"
az group list --query "[?starts_with(name, 'rg-tre') && tags.ci_git_ref != null && starts_with(tags.ci_git_ref, 'refs')].[name, tags.ci_git_ref]" -o tsv |
while read -r rg_name rg_ref_name; do
if [[ "${rg_ref_name}" == refs/pull* ]]
then
# this rg originated from an external PR (i.e. a fork)
pr_num=${rg_ref_name//[!0-9]/}
is_open_pr=$(echo "${open_prs}" | jq -c "[ .[] | select( .number | contains(${pr_num})) ] | length")
if [ "${is_open_pr}" == "0" ]
then
echo "PR ${pr_num} (derived from ref ${rg_ref_name}) is not open. Environment in ${rg_name} will be deleted."
devops/scripts/destroy_env_no_terraform.sh --core-tre-rg "${rg_name}" --no-wait
continue
fi
# The pr is still open...
# The ci_git_ref might not contain the actual ref, but the "pull" ref. We need the actual head branch name.
head_ref=$(echo "${open_prs}" | jq -r ".[] | select (.number == ${pr_num}) | .headRefName")
# Checking when was the last commit on the branch.
last_commit_date_string=$(git for-each-ref --sort='-committerdate:iso8601' --format=' %(committerdate:iso8601)%09%(refname)' "refs/remotes/origin/${head_ref}" | cut -f1)
# updatedAt is changed on commits but probably comments as well.
# For PRs from forks we'll need this as the repo doesn't have the PR code handy.
pr_updated_at=$(echo "${open_prs}" | jq -r ".[] | select (.number == ${pr_num}) | .updatedAt")
echo "PR ${pr_num} source branch is ${head_ref}, last commit was on: ${last_commit_date_string}, last update was on: ${pr_updated_at}"
if [ -n "${last_commit_date_string}" ]; then
diff_in_hours=$(( ($(date +%s) - $(date -d "${last_commit_date_string}" +%s) )/(60*60) ))
else
diff_in_hours=$(( ($(date +%s) - $(date -d "${pr_updated_at}" +%s) )/(60*60) ))
fi
if (( diff_in_hours > BRANCH_LAST_ACTIVITY_IN_HOURS_FOR_DESTROY )); then
echo "No recent activity on ${head_ref}. Environment in ${rg_name} will be destroyed."
devops/scripts/destroy_env_no_terraform.sh --core-tre-rg "${rg_name}" --no-wait
elif (( diff_in_hours > BRANCH_LAST_ACTIVITY_IN_HOURS_FOR_STOP )); then
echo "No recent activity on ${head_ref}. Environment in ${rg_name} will be stopped."
stopEnv "${rg_name}"
fi
else
# this rg originated from an internal branch on this repo
ref_in_remote="${rg_ref_name/heads/remotes\/origin}"
if ! git show-ref -q "$ref_in_remote"
then
echo "Ref ${rg_ref_name} does not exist, and environment ${rg_name} can be deleted."
devops/scripts/destroy_env_no_terraform.sh --core-tre-rg "${rg_name}" --no-wait
else
# checking when was the last commit on the branch.
last_commit_date_string=$(git for-each-ref --sort='-committerdate:iso8601' --format=' %(committerdate:iso8601)%09%(refname)' "${ref_in_remote}" | cut -f1)
echo "Native ref is ${rg_ref_name}, last commit was on: ${last_commit_date_string}"
diff_in_hours=$(( ($(date +%s) - $(date -d "${last_commit_date_string}" +%s) )/(60*60) ))
if (( diff_in_hours > BRANCH_LAST_ACTIVITY_IN_HOURS_FOR_DESTROY )); then
echo "No recent activity on ${rg_ref_name}. Environment in ${rg_name} will be destroyed."
devops/scripts/destroy_env_no_terraform.sh --core-tre-rg "${rg_name}" --no-wait
elif (( diff_in_hours > BRANCH_LAST_ACTIVITY_IN_HOURS_FOR_STOP )); then
echo "No recent activity on ${rg_ref_name}. Environment in ${rg_name} will be stopped."
stopEnv "${rg_name}"
fi
fi
fi
done
# check if any workflows run on the main branch (except the cleanup=current one)
# to prevent us deleting a workspace for which an E2E (on main) is currently running
if [[ -z $(gh api "https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/runs?branch=main&status=in_progress" | jq --arg name "$GITHUB_WORKFLOW" '.workflow_runs | select(.[].name != $name)') ]]
then
# if not, we can delete old workspace resource groups that were left due to errors.
az group list --query "[?starts_with(name, 'rg-${MAIN_TRE_ID}-ws-')].name" -o tsv |
while read -r rg_name; do
echo "Deleting resource group: ${rg_name}"
az group delete --yes --no-wait --name "${rg_name}"
done
else
echo "Workflows are running on the main branch, can't delete e2e workspaces."
fi
|
AzureTRE/devops/scripts/clean_ci_validation_envs.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/clean_ci_validation_envs.sh",
"repo_id": "AzureTRE",
"token_count": 1963
}
| 103 |
#!/usr/bin/env bash
prs=$(gh pr list -s open -A app/dependabot -l javascript --json headRefName | jq ".[].headRefName"| tr -d '"')
for pr in $prs
do
command="git merge upstream/$pr --no-edit"
echo "$command"
$command
done
|
AzureTRE/devops/scripts/process_prs.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/process_prs.sh",
"repo_id": "AzureTRE",
"token_count": 88
}
| 104 |
variable "mgmt_storage_account_name" {
type = string
description = "Storage account created by bootstrap to hold all Terraform state"
}
variable "mgmt_resource_group_name" {
type = string
description = "Shared management resource group"
}
variable "location" {
type = string
description = "Location used for all resources"
}
variable "acr_sku" {
type = string
default = "Standard"
description = "Price tier for ACR"
}
variable "acr_name" {
type = string
description = "Name of ACR"
}
|
AzureTRE/devops/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/devops/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 197
}
| 105 |
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Resource id.",
"format": "uuid"
},
"parentId": {
"type": "string",
"description": "Parent resource id.",
"format": "uuid"
},
"specificationVersion": {
"type": "string",
"description": "Resource specification version."
},
"ResourceVersion": {
"type": "string",
"description": "Resource version."
},
"title": {
"type": "string",
"description": "Resource title.",
"maxLength": 120
},
"description": {
"type": "string",
"description": "Resource description.",
"maxLength": 120
},
"resourceType": {
"type": "string",
"description": "Resource type.",
"enum": ["Workspace", "Service"]
},
"status": {
"type": "string",
"description": "Resource status.",
"enum": [
"NotDeployed",
"Deploying",
"Deployed",
"Deleting",
"Deleted"
]
},
"isDeleted": {
"type": "boolean",
"description": "Is the resource marked for deletion?"
},
"properties": {
"type": "array",
"description": "Resource properties.",
"items": {"$ref": "#/$defs/property" }
},
"etag": {
"type": "string",
"description": "ETag."
}
},
"required": [
"id",
"parentId",
"specificationVersion",
"resourceVersion",
"title",
"description",
"resourceType",
"status",
"isDeleted",
"properties",
"etag"
],
"additionalProperties": false,
"$defs": {
"property": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Property title."
},
"description": {
"type": "string",
"description": "Property description."
},
"propertyType": {
"type": "string",
"description": "Property type."
},
"default": {
"type": "string",
"description": "Property default value."
},
"required": {
"type": "boolean",
"description": "Is property required?"
},
"mutable": {
"type": "boolean",
"description": "Is value mutable?"
},
"value": {
"type": "string",
"description": "Property value."
}
},
"required": [
"title",
"description",
"propertyType",
"required",
"mutable",
"value"
]
}
}
}
|
AzureTRE/docs/schemas/resource.json/0
|
{
"file_path": "AzureTRE/docs/schemas/resource.json",
"repo_id": "AzureTRE",
"token_count": 1982
}
| 106 |
# Getting Started
This section provide the guidelines for any engineer to deploy AzureTRE. This how-to will enable one to get familiar with a TRE deployment and its concepts.
!!! tip
For troubleshooting purpose make sure to check the [Troubleshooting FAQ](../../troubleshooting-faq/index.md).
|
AzureTRE/docs/tre-admins/setup-instructions/index.md/0
|
{
"file_path": "AzureTRE/docs/tre-admins/setup-instructions/index.md",
"repo_id": "AzureTRE",
"token_count": 76
}
| 107 |
# Guacamole User Resource Service bundle (Linux)
This is a User Resource Service template. It defines a Linux-based VM to be used by TRE researchers and to be connected to using a [Guacamole server](https://guacamole.apache.org/).
It blocks all inbound and outbound traffic to the internet and allows only RDP connections from within the vnet.
## Prerequisites
- [A base workspace bundle installed](../workspaces/base.md)
- [A guacamole workspace service bundle installed](../workspace-services/guacamole.md)
|
AzureTRE/docs/tre-templates/user-resources/guacamole-linux-vm.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/user-resources/guacamole-linux-vm.md",
"repo_id": "AzureTRE",
"token_count": 136
}
| 108 |
# Azure TRE base workspace
The base workspace template is the foundation that all other workspaces and workspace services are built upon. Alternative workspace architectures could be used. However, the templates provided in this repository rely on the specific architecture of this base workspace.
The base workspace template contains the following resources:
- Virtual Network
- Storage Account
- Key Vault
- VNet Peer to Core VNet
- Network Security Group
- App Service Plan
## Workspace Configuration
When deploying a workspace the following properties need to be configured.
### Required Properties
| Property | Options | Description |
| -------- | ------- | ----------- |
| `client_id` | Valid client ID of the Workspace App Registration. | The OpenID client ID which should be submitted to the OpenID service when necessary. This value is typically provided to you by the OpenID service when OpenID credentials are generated for your application. |
| `client_secret` | Valid client secret. |
## Azure Trusted Services
*Azure Trusted Services* are allowed to connect to both the key vault and storage account provsioned within the workspace. If this is undesirable additonal resources without this setting configured can be deployed.
Further details around which Azure services are allowed to connect can be found below:
- Key Vault: <https://docs.microsoft.com/en-us/azure/key-vault/general/overview-vnet-service-endpoints#trusted-services>
- Azure Storage: <https://docs.microsoft.com/en-us/azure/storage/common/storage-network-security?msclkid=ee4e79e4b97911eca46dae54da464d11&tabs=azure-portal#trusted-access-for-resources-registered-in-your-subscription>
|
AzureTRE/docs/tre-templates/workspaces/base.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/workspaces/base.md",
"repo_id": "AzureTRE",
"token_count": 404
}
| 109 |
# Creating Custom templates
This document will show how to create custom templates and integrate them into your CI/CD pipelines.
## Templates types
There are 3 types of templates:
1. Workspace
1. Workspace Service
1. User Resource
Read more about them [here](../../index.md#workspace)
## How to add custom templates
AzureTRE deployment repository has directories set up for workspace, workspace service and user resource template definitions.
See the [template authoring guide](../../tre-workspace-authors/authoring-workspace-templates.md) to learn more about how to author templates.
**To add your custom templates follow the next steps:**
1. Add your template under the relevant folder (For example: if you are adding a new workspace template then place it under `/templates/workspaces` folder).
1. Use existing templates in AzureTRE as a reference.
1. Add porter configuration - AzureTRE uses [Porter](https://porter.sh/) as a solution for implementing and deploying workspaces and workspace, learn more about how it is used in AzureTRE [here](https://microsoft.github.io/AzureTRE/tre-developers/resource-processor/#porter).
1. Add terraform scripts to set up your deployment plan.
- Define resource template in the API - follow [this readme](https://microsoft.github.io/AzureTRE/tre-admins/registering-templates/) to register your template.
- Use the [AzureTRE UI](https://microsoft.github.io/AzureTRE/tre-developers/ui/) to deploy your resources
- Add your custom templates to CI/CD workflows - in Deploy Azure TRE Reusable workflow make sure to add your bundles under register_bundles and publish_bundles steps.
## Publish and Register Custom templates in the CI/CD
See the [pipelines documentation](../../tre-admins/setup-instructions/cicd-deployment.md) to learn more about publishing and registering your custom templates as part of the CI/CD/
## How to Contribute to our Documentation
If you have any comments or suggestions about our documentation then you can visit our GitHub project and either raise a new issue or comment on one of the existing ones.
You can find our existing documentation issues on GitHub by clicking on the link below:
[Existing Documentation Issues](https://github.com/microsoft/AzureTRE/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation)
Or, you can raise a new issue by clicking on this link:
[Report an Issue or Make a Suggestion](https://github.com/microsoft/AzureTRE/issues/new/choose)
**Thank you for your patience and support!**
|
AzureTRE/docs/using-tre/templates/index.md/0
|
{
"file_path": "AzureTRE/docs/using-tre/templates/index.md",
"repo_id": "AzureTRE",
"token_count": 661
}
| 110 |
import pytest
from httpx import AsyncClient
from starlette import status
import config
from e2e_tests.conftest import clean_up_test_workspace, create_or_get_test_workspace
from e2e_tests.resources.workspace import get_workspace
from helpers import assert_status, get_auth_header, get_template
from resources import strings
from helpers import get_admin_token
pytestmark = pytest.mark.asyncio
workspace_templates = [
(strings.BASE_WORKSPACE)
]
workspace_templates_test_create = [
# Base workspace template is excluded as covered by other extended tests
(strings.UNRESTRICTED_WORKSPACE),
(strings.AIRLOCK_IMPORT_REVIEW_WORKSPACE)
]
@pytest.mark.smoke
@pytest.mark.parametrize("template_name", workspace_templates)
async def test_get_workspace_templates(template_name, verify) -> None:
async with AsyncClient(verify=verify) as client:
admin_token = await get_admin_token(verify)
response = await client.get(f"{config.TRE_URL}{strings.API_WORKSPACE_TEMPLATES}", headers=get_auth_header(admin_token))
template_names = [templates["name"] for templates in response.json()["templates"]]
assert (template_name in template_names), f"No {template_name} template found"
@pytest.mark.smoke
@pytest.mark.parametrize("template_name", workspace_templates)
async def test_get_workspace_template(template_name, verify) -> None:
admin_token = await get_admin_token(verify)
async with get_template(template_name, strings.API_WORKSPACE_TEMPLATES, admin_token, verify) as response:
assert_status(response, [status.HTTP_200_OK], f"Failed to GET template: {template_name}")
@pytest.mark.extended
@pytest.mark.parametrize("template_name", workspace_templates_test_create)
async def test_create_workspace_templates(template_name, verify) -> None:
workspace_path, workspace_id = await create_or_get_test_workspace(auth_type="Automatic", verify=verify, template_name=template_name)
async with AsyncClient(verify=verify) as client:
admin_token = await get_admin_token(verify=verify)
auth_headers = get_auth_header(admin_token)
workspace = await get_workspace(client, workspace_id, auth_headers)
assert workspace["deploymentStatus"] == strings.RESOURCE_STATUS_DEPLOYED
# Tear-down in a cascaded way
await clean_up_test_workspace(pre_created_workspace_id="", workspace_path=workspace_path, verify=verify)
|
AzureTRE/e2e_tests/test_workspace_templates.py/0
|
{
"file_path": "AzureTRE/e2e_tests/test_workspace_templates.py",
"repo_id": "AzureTRE",
"token_count": 833
}
| 111 |
from typing import Optional
from multiprocessing import Process
import json
import asyncio
import logging
import sys
from resources.commands import azure_acr_login_command, azure_login_command, build_porter_command, build_porter_command_for_outputs, apply_porter_credentials_sets_command
from shared.config import get_config
from resources.helpers import get_installation_id
from resources.httpserver import start_server
from shared.logging import initialize_logging, logger, shell_output_logger, tracer
from shared.config import VERSION
from resources import statuses
from contextlib import asynccontextmanager
from azure.servicebus import ServiceBusMessage, NEXT_AVAILABLE_SESSION
from azure.servicebus.exceptions import OperationTimeoutError, ServiceBusConnectionError
from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
from azure.identity.aio import DefaultAzureCredential
def set_up_config() -> Optional[dict]:
try:
config = get_config()
return config
except KeyError as e:
logger.error(f"Environment variable {e} is not set correctly...Exiting")
sys.exit(1)
@asynccontextmanager
async def default_credentials(msi_id):
"""
Context manager which yields the default credentials.
"""
credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()
yield credential
await credential.close()
async def receive_message(service_bus_client, config: dict):
"""
This method is run per process. Each process will connect to service bus and try to establish a session.
If messages are there, the process will continue to receive all the messages associated with that session.
If no messages are there, the session connection will time out, sleep, and retry.
"""
q_name = config["resource_request_queue"]
while True:
try:
logger.info("Looking for new session...")
# max_wait_time=1 -> don't hold the session open after processing of the message has finished
async with service_bus_client.get_queue_receiver(queue_name=q_name, max_wait_time=1, session_id=NEXT_AVAILABLE_SESSION) as receiver:
logger.info(f"Got a session containing messages: {receiver.session.session_id}")
async with AutoLockRenewer() as renewer:
# allow a session to be auto lock renewed for up to an hour - if it's processing a message
renewer.register(receiver, receiver.session, max_lock_renewal_duration=3600)
async for msg in receiver:
result = True
message = ""
try:
message = json.loads(str(msg))
except (json.JSONDecodeError) as e:
logger.error(f"Received bad service bus resource request message: {e}")
with tracer.start_as_current_span("receive_message") as current_span:
current_span.set_attribute("resource_id", message["id"])
current_span.set_attribute("action", message["action"])
current_span.set_attribute("step_id", message["stepId"])
current_span.set_attribute("operation_id", message["operationId"])
logger.info(f"Message received for resource_id={message['id']}, operation_id={message['operationId']}, step_id={message['stepId']}")
result = await invoke_porter_action(message, service_bus_client, config)
if result:
logger.info(f"Resource request for {message} is complete")
else:
logger.error('Message processing failed!')
logger.info(f"Message for resource_id={message['id']}, operation_id={message['operationId']} processed as {result} and marked complete.")
await receiver.complete_message(msg)
logger.info(f"Closing session: {receiver.session.session_id}")
except OperationTimeoutError:
# Timeout occurred whilst connecting to a session - this is expected and indicates no non-empty sessions are available
logger.debug("No sessions for this process. Will look again...")
except ServiceBusConnectionError:
# Occasionally there will be a transient / network-level error in connecting to SB.
logger.info("Unknown Service Bus connection error. Will retry...")
except Exception:
# Catch all other exceptions, log them via .exception to get the stack trace, sleep, and reconnect
logger.exception("Unknown exception. Will retry...")
async def run_porter(command, config: dict):
"""
Run a Porter command
"""
command = [
f"{azure_login_command(config)} && ",
f"{azure_acr_login_command(config)} && ",
f"{apply_porter_credentials_sets_command(config)} && ",
*command
]
proc = await asyncio.create_subprocess_shell(
''.join(command),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=config["porter_env"]
)
stdout, stderr = await proc.communicate()
logger.debug(f'run porter exited with {proc.returncode}')
result_stdout = None
result_stderr = None
if stdout:
result_stdout = stdout.decode()
shell_output_logger(result_stdout, '[stdout]', logging.INFO)
if stderr:
result_stderr = stderr.decode()
shell_output_logger(result_stderr, '[stderr]', logging.WARN)
return (proc.returncode, result_stdout, result_stderr)
def service_bus_message_generator(sb_message: dict, status: str, deployment_message: str, outputs=None):
"""
Generate a resource request message
"""
installation_id = get_installation_id(sb_message)
message_dict = {
"operationId": sb_message["operationId"],
"stepId": sb_message["stepId"],
"id": sb_message["id"],
"status": status,
"message": f"{installation_id}: {deployment_message}"}
if outputs is not None:
message_dict["outputs"] = outputs
resource_request_message = json.dumps(message_dict)
logger.debug(f"Deployment Status Message: {resource_request_message}")
return resource_request_message
async def invoke_porter_action(msg_body: dict, sb_client: ServiceBusClient, config: dict) -> bool:
"""
Handle resource message by invoking specified porter action (i.e. install, uninstall)
"""
installation_id = get_installation_id(msg_body)
action = msg_body["action"]
logger.info(f"{action} action starting for {installation_id}...")
sb_sender = sb_client.get_queue_sender(queue_name=config["deployment_status_queue"])
# post an update message to set the status to an 'in progress' one
resource_request_message = service_bus_message_generator(msg_body, statuses.in_progress_status_string_for[action], "Job starting")
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"], session_id=msg_body["operationId"]))
logger.info(f'Sent status message for {installation_id} - {statuses.in_progress_status_string_for[action]} - Job starting')
# Build and run porter command (flagging if its a built-in action or custom so we can adapt porter command appropriately)
is_custom_action = action not in ["install", "upgrade", "uninstall"]
porter_command = await build_porter_command(config, msg_body, is_custom_action)
logger.debug("Starting to run porter execution command...")
returncode, _, err = await run_porter(porter_command, config)
logger.debug("Finished running porter execution command.")
action_completed_without_error = True
# Handle command output
if returncode != 0 and err is not None:
error_message = "Error message: " + " ".join(err.split('\n')) + "; Command executed: " + " ".join(porter_command)
action_completed_without_error = False
if "uninstall" == action and "could not find installation" in err:
logger.warning("The installation doesn't exist. Treating as a successful action to allow the flow to proceed.")
action_completed_without_error = True
error_message = f"A success despite of underlying error. {error_message}"
if action_completed_without_error:
status_for_sb_message = statuses.pass_status_string_for[action]
else:
status_for_sb_message = statuses.failed_status_string_for[action]
resource_request_message = service_bus_message_generator(msg_body, status_for_sb_message, error_message)
# Post message on sb queue to notify receivers of action failure
logger.info(f"{installation_id}: Porter action failed with error = {error_message}")
else:
# Get the outputs
get_porter_outputs_successful, outputs = await get_porter_outputs(msg_body, config)
if get_porter_outputs_successful:
status_for_sb_message = statuses.pass_status_string_for[action]
status_message = f"{action} action completed successfully."
else:
action_completed_without_error = False
status_for_sb_message = statuses.failed_status_string_for[action]
status_message = f"{action} action completed successfully, but failed to get outputs."
resource_request_message = service_bus_message_generator(msg_body, status_for_sb_message, status_message, outputs)
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"], session_id=msg_body["operationId"]))
logger.info(f"Sent status message for {installation_id}: {status_for_sb_message}")
# return true as want to continue processing the message
return action_completed_without_error
async def get_porter_outputs(msg_body: dict, config: dict):
"""
Get outputs JSON from a Porter command
"""
porter_command = await build_porter_command_for_outputs(msg_body)
logger.debug("Starting to run porter output command...")
returncode, stdout, err = await run_porter(porter_command, config)
logger.debug("Finished running porter output command.")
if returncode != 0:
error_message = "Error context message = " + " ".join(err.split('\n'))
logger.info(f"{get_installation_id(msg_body)}: Failed to get outputs with error = {error_message}")
return False, {}
else:
outputs_json = {}
try:
outputs_json = json.loads(stdout)
# loop props individually to try to deserialise to dict/list, as all TF outputs are strings, but we want the pure value
for i in range(0, len(outputs_json)):
if "{" in outputs_json[i]['value'] or "[" in outputs_json[i]['value']:
outputs_json[i]['value'] = json.loads(outputs_json[i]['value'].replace("\\", ""))
logger.info(f"Got outputs as json: {outputs_json}")
except ValueError:
logger.error(f"Got outputs invalid json: {stdout}")
return True, outputs_json
async def runner(process_number: int, config: dict):
with tracer.start_as_current_span(process_number):
async with default_credentials(config["vmss_msi_id"]) as credential:
service_bus_client = ServiceBusClient(config["service_bus_namespace"], credential)
await receive_message(service_bus_client, config)
async def check_runners(processes: list, httpserver: Process):
logger.info("Starting runners check...")
while True:
await asyncio.sleep(30)
if all(not process.is_alive() for process in processes):
logger.error("All runner processes have failed!")
httpserver.kill()
if __name__ == "__main__":
initialize_logging()
logger.info("Resource processor starting...")
with tracer.start_as_current_span("resource_processor_main"):
config = set_up_config()
logger.info("Verifying Azure CLI and Porter functionality...")
asyncio.run(run_porter(["az account show -o table"], config))
httpserver = Process(target=start_server)
httpserver.start()
logger.info("Started http server")
processes = []
num = config["number_processes_int"]
logger.info(f"Starting {num} processes...")
for i in range(num):
logger.info(f"Starting process {str(i)}")
process = Process(target=lambda: asyncio.run(runner(i, config)))
processes.append(process)
process.start()
logger.info("All processes have been started. Version is: %s", VERSION)
asyncio.run(check_runners(processes, httpserver))
logger.warn("Exiting main...")
|
AzureTRE/resource_processor/vmss_porter/runner.py/0
|
{
"file_path": "AzureTRE/resource_processor/vmss_porter/runner.py",
"repo_id": "AzureTRE",
"token_count": 4960
}
| 112 |
# syntax=docker/dockerfile-upstream:1.4.0
FROM --platform=linux/amd64 debian:bullseye-slim
# PORTER_INIT
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
# Install jq
RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \
apt-get update && \
apt-get install -y zip --no-install-recommends
# PORTER_MIXINS
# Use the BUNDLE_DIR build argument to copy files into the bundle
COPY --link . ${BUNDLE_DIR}/
WORKDIR "${BUNDLE_DIR}/app"
RUN zip -r /cnab/app/LogicApp.zip .
|
AzureTRE/templates/shared_services/airlock_notifier/Dockerfile.tmpl/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/Dockerfile.tmpl",
"repo_id": "AzureTRE",
"token_count": 267
}
| 113 |
resource "azurerm_firewall_policy_rule_collection_group" "core_airlock_notifier" {
name = "rcg-core-airlock-notifier"
firewall_policy_id = data.azurerm_firewall_policy.core.id
priority = 501
network_rule_collection {
name = "nrc-resource-processor-appservice-deployment"
priority = 201
action = "Allow"
rule {
name = "AppService"
protocols = [
"TCP"
]
destination_addresses = [
"AppService"
]
destination_ports = [
"443"
]
source_ip_groups = [data.azurerm_ip_group.resource_processor.id]
}
}
}
|
AzureTRE/templates/shared_services/airlock_notifier/terraform/rules.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/terraform/rules.tf",
"repo_id": "AzureTRE",
"token_count": 291
}
| 114 |
output "fqdn" {
value = azurerm_public_ip.appgwpip.fqdn
}
output "application_gateway_name" {
value = azurerm_application_gateway.agw.name
}
output "storage_account_name" {
value = azurerm_storage_account.staticweb.name
}
output "resource_group_name" {
value = azurerm_application_gateway.agw.resource_group_name
}
output "keyvault_name" {
value = data.azurerm_key_vault.key_vault.name
}
|
AzureTRE/templates/shared_services/certs/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/certs/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 158
}
| 115 |
variable "tre_id" {
type = string
}
variable "tre_resource_id" {
type = string
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/shared_services/cyclecloud/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/cyclecloud/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 46
}
| 116 |
#!/bin/bash
# This script exists to support the migration from the firewall into a shared service bundle, that can be deployed from a dev workstation.
set -e
PLAN_FILE="tfplan$$"
TS=$(date +"%s")
LOG_FILE="${TS}-tre-${SHARED_SERVICE_KEY}.log"
LOC="$(dirname -- "$(readlink -f "${BASH_SOURCE}")")"
${LOC}/../../devops/scripts/terraform_wrapper.sh \
-g $TF_VAR_mgmt_resource_group_name \
-s $TF_VAR_mgmt_storage_account_name \
-n $TF_VAR_terraform_state_container_name \
-k ${TRE_ID}-${SHARED_SERVICE_KEY} \
-l ${LOG_FILE} \
-c "terraform plan -out ${PLAN_FILE} && \
terraform apply -input=false -auto-approve ${PLAN_FILE} && \
terraform output -json > ../tre_output.json"
|
AzureTRE/templates/shared_services/deploy_from_local.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/deploy_from_local.sh",
"repo_id": "AzureTRE",
"token_count": 279
}
| 117 |
output "gitea_fqdn" {
value = azurerm_linux_web_app.gitea.default_hostname
}
output "address_prefixes" {
value = jsonencode(data.azurerm_subnet.web_app.address_prefixes)
}
output "gitea_allowed_fqdns_list" {
value = jsonencode(local.gitea_allowed_fqdns_list)
}
output "connection_uri" {
value = "https://${azurerm_linux_web_app.gitea.default_hostname}"
}
output "is_exposed_externally" {
value = false
}
|
AzureTRE/templates/shared_services/gitea/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/gitea/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 175
}
| 118 |
{
"name": "docker-hub",
"online": true,
"storage": {
"blobStoreName": "default",
"strictContentTypeValidation": true,
"write_policy": "ALLOW"
},
"proxy": {
"remoteUrl": "https://registry-1.docker.io",
"contentMaxAge": 1440,
"metadataMaxAge": 1440
},
"dockerProxy": {
"indexType": "HUB"
},
"docker": {
"v1Enabled": true,
"forceBasicAuth": false,
"httpsPort": 8083
},
"negativeCache": {
"enabled": true,
"timeToLive": 1440
},
"httpClient": {
"blocked": false,
"autoBlock": false,
"connection": {
"retries": 0,
"userAgentSuffix": "string",
"timeout": 60,
"enableCircularRedirects": false,
"enableCookies": false,
"useTrustStore": false
}
},
"baseType": "docker",
"repoType": "proxy"
}
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config/docker_hub_proxy_conf.json/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config/docker_hub_proxy_conf.json",
"repo_id": "AzureTRE",
"token_count": 404
}
| 119 |
locals {
core_vnet = "vnet-${var.tre_id}"
core_resource_group_name = "rg-${var.tre_id}"
nexus_allowed_fqdns = "pypi.org,*.pypi.org,files.pythonhosted.org,security.ubuntu.com,archive.ubuntu.com,keyserver.ubuntu.com,repo.anaconda.com,*.docker.com,*.docker.io,conda.anaconda.org,azure.archive.ubuntu.com,packages.microsoft.com,repo.almalinux.org,download-ib01.fedoraproject.org,cran.r-project.org,cloud.r-project.org"
nexus_allowed_fqdns_list = distinct(compact(split(",", replace(local.nexus_allowed_fqdns, " ", ""))))
workspace_vm_allowed_fqdns = "r3.o.lencr.org,x1.c.lencr.org"
workspace_vm_allowed_fqdns_list = distinct(compact(split(",", replace(local.workspace_vm_allowed_fqdns, " ", ""))))
storage_account_name = lower(replace("stg-${var.tre_id}", "-", ""))
tre_shared_service_tags = {
tre_id = var.tre_id
tre_shared_service_id = var.tre_resource_id
}
}
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/locals.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/locals.tf",
"repo_id": "AzureTRE",
"token_count": 448
}
| 120 |
#!/bin/bash
set -euo pipefail
eval "$(jq -r '@sh "AUTH_CLIENT_ID=\(.auth_client_id) AUTH_CLIENT_SECRET=\(.auth_client_secret) AUTH_TENANT_ID=\(.auth_tenant_id) WORSKPACE_CLIENT_ID=\(.workspace_client_id)"')"
az cloud set --name "$AZURE_ENVIRONMENT"
az login --allow-no-subscriptions --service-principal --username "$AUTH_CLIENT_ID" --password "$AUTH_CLIENT_SECRET" --tenant "$AUTH_TENANT_ID" > /dev/null
msGraphUri="$(az cloud show --query endpoints.microsoftGraphResourceId --output tsv)/v1.0"
# get the service principal object id
sp=$(az rest --method GET --uri "${msGraphUri}/serviceprincipals?\$filter=appid eq '${WORSKPACE_CLIENT_ID}'" -o json)
spId=$(echo "$sp" | jq -r '.value[0].id')
# filter to the Workspace Researcher Role
workspaceResearcherRoleId=$(echo "$sp" | jq -r '.value[0].appRoles[] | select(.value == "WorkspaceResearcher") | .id')
principals=$(az rest --method GET --uri "${msGraphUri}/serviceprincipals/${spId}/appRoleAssignedTo" -o json | jq -r --arg workspaceResearcherRoleId "${workspaceResearcherRoleId}" '.value[] | select(.appRoleId == $workspaceResearcherRoleId) | .principalId')
jq -n --arg principals "$principals" '{"principals":$principals}'
|
AzureTRE/templates/workspace_services/azureml/terraform/get_app_role_members.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/terraform/get_app_role_members.sh",
"repo_id": "AzureTRE",
"token_count": 459
}
| 121 |
# Using AzApi due to https://github.com/hashicorp/terraform-provider-azurerm/issues/15362
resource "azapi_resource" "compute_instance" {
type = "Microsoft.MachineLearningServices/workspaces/computes@2022-06-01-preview"
name = local.aml_compute_instance_name
location = data.azurerm_resource_group.ws.location
parent_id = data.azurerm_machine_learning_workspace.workspace.id
tags = local.tre_user_resources_tags
schema_validation_enabled = false
body = jsonencode({
properties = {
computeType = "ComputeInstance"
properties = {
vmSize = var.vm_size_sku
computeInstanceAuthorizationType = "personal"
enableNodePublicIp = false
personalComputeInstanceSettings = {
assignedUser = {
objectId = var.user_object_id
tenantId = var.auth_tenant_id
}
}
subnet = {
id = data.azurerm_subnet.aml.id
}
}
}
})
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/terraform/compute.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/terraform/compute.tf",
"repo_id": "AzureTRE",
"token_count": 546
}
| 122 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/guacamole/template_schema.json",
"type": "object",
"title": "Gitea",
"description": "Gitea is a lightweight DevOps platform. Features include code hosting, code review, CI/CD, project management and package management.",
"required": [
],
"properties": {
"display_name": {
"type": "string",
"title": "Name for the workspace service",
"description": "The name of the workspace service to be displayed to users",
"default": "Gitea",
"updateable": true
},
"description": {
"type": "string",
"title": "Description of the workspace service",
"description": "Description of the workspace service",
"default": "Gitea is a lightweight DevOps platform. Features include code hosting, code review, CI/CD, project management and package management.",
"updateable": true
},
"overview": {
"type": "string",
"title": "Workspace Service Overview",
"description": "Long form description of the workspace service, in markdown syntax",
"default": "Gitea is a lightweight DevOps platform. Features include code hosting, code review, CI/CD, project management and package management. Documentation can be found here: [https://docs.gitea.com/](https://docs.gitea.com/).",
"updateable": true
},
"sql_sku": {
"$id": "#/properties/sql_sku",
"type": "string",
"title": "MySQL server SKU",
"description": "MySQL server SKU",
"updateable": true,
"enum": [
"B | 4GB 2vCores",
"GP | 8GB 2vCores",
"BC | 16GB 2vCores"
],
"default": "B | 4GB 2vCores"
},
"is_exposed_externally": {
"$id": "#/properties/is_exposed_externally",
"type": "boolean",
"title": "Expose externally",
"description": "Is Gitea accessible from outside of the TRE network.",
"default": false
}
},
"uiSchema": {
"is_exposed_externally": {
"classNames": "tre-hidden"
}
},
"pipeline": {
"install": [
{
"stepId": "main"
},
{
"stepId": "12ba0dad-ea6c-4d0d-9255-d316212f5ffa",
"stepTitle": "Add Gitea URI as AAD redirect URI",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": [
{
"name": "aad_redirect_uris",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "{{ resource.id }}",
"value": "{{ resource.properties.authentication_callback_uri }}"
}
}
]
},
{
"stepId": "260421b3-7308-491f-b531-e007cbh0ff46",
"stepTitle": "Add network firewall rules for gitea",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_gitea",
"action": "Allow",
"rules": [
{
"name": "AzureAD",
"description": "AAD access",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"destination_addresses": ["AzureActiveDirectory"],
"destination_ports": ["*"],
"protocols": ["TCP"]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_gitea",
"action": "Allow",
"rules": [
{
"name": "AAD CDN",
"description": "AAD CDN",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"target_fqdns": [
"aadcdn.msftauth.net"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"upgrade": [
{
"stepId": "main"
},
{
"stepId": "260421b3-7308-491f-b531-e007cdc0ff46",
"stepTitle": "Add network firewall rules for gitea",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_gitea",
"action": "Allow",
"rules": [
{
"name": "AzureAD",
"description": "AAD access for authNZ",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"destination_addresses": ["AzureActiveDirectory"],
"destination_ports": ["*"],
"protocols": ["TCP"]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_gitea",
"action": "Allow",
"rules": [
{
"name": "AAD CDN",
"description": "AAD CDN",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"target_fqdns": [
"aadcdn.msftauth.net"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
},
{
"stepId": "741c7ff2-eff5-47b2-bf62-2b410d65c96b",
"stepTitle": "Add Gitea URI as AAD redirect URI",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": [
{
"name": "aad_redirect_uris",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "{{ resource.id }}",
"value": "{{ resource.properties.authentication_callback_uri }}"
}
}
]
}
],
"uninstall": [
{
"stepId": "9a1d6b95-26c8-4165-8890-573dd4e2b45c",
"stepTitle": "Update Gitea URI to AAD redirect URI",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": [
{
"name": "aad_redirect_uris",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "{{ resource.id }}"
}
}
]
},
{
"stepId": "260421b3-7388-491f-b531-e007cdc0ff46",
"stepTitle": "Add network firewall rules for gitea",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_gitea"
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_gitea"
}
}
]
},
{
"stepId": "main"
}
]
}
}
|
AzureTRE/templates/workspace_services/gitea/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 4645
}
| 123 |
FROM maven:3-jdk-11-slim AS client_build
COPY ./guacamole-auth-azure/pom.xml /pom.xml
# cache dependencies in a separate layer
RUN mvn package -Dmaven.test.skip
COPY ./guacamole-auth-azure/src /src
COPY ./docker/maven_package_and_exit_succesfully.sh /tmp/
RUN bash /tmp/maven_package_and_exit_succesfully.sh
FROM scratch as test-results
COPY --from=client_build /target/surefire-reports/* /
FROM guacamole/guacd:1.5.3
ARG GUACAMOLE_AZURE_VERSION=0.3.4
ENV DEBIAN_FRONTEND=noninteractive
# https://github.com/microsoft/AzureTRE/issues/1937
# hadolint ignore=DL3002
USER root
RUN apk add --update --no-cache wget openssh openjdk11-jre \
&& ssh-keygen -A
ENV CATALINA_BASE=/usr/share/tomcat9/
RUN TOMCAT_ARCHIVE="tomcat.tar.gz" && \
TOMCAT_VER="9.0.83" && \
wget -O "$TOMCAT_ARCHIVE" -N "https://archive.apache.org/dist/tomcat/tomcat-9/v${TOMCAT_VER}/bin/apache-tomcat-${TOMCAT_VER}.tar.gz" --progress=dot:giga && \
tar xzf "$TOMCAT_ARCHIVE" && \
rm -f "$TOMCAT_ARCHIVE" && \
mv "apache-tomcat-${TOMCAT_VER}/" "$CATALINA_BASE"
ENV GUACAMOLE_HOME=/guacamole/
ENV GUACAMOLE_LIB="${GUACAMOLE_HOME}/lib/"
ENV CLASSPATH=${GUACAMOLE_LIB}:${CLASSPATH}
RUN mkdir /guac-transfer
COPY ./docker/guacamole/ ${GUACAMOLE_HOME}
RUN S6_ARCHIVE=s6-overlay.tar.gz && \
wget -O "$S6_ARCHIVE" "https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-amd64.tar.gz" --progress=dot:giga && \
tar xzvf "$S6_ARCHIVE" -C / && \
rm -f "$S6_ARCHIVE"
COPY ./docker/sshd_config /etc/ssh/
COPY ./docker/services /etc/services.d/
# retrieve auth integration from build image
COPY --from=client_build /target/lib/* "${GUACAMOLE_LIB}"
COPY --from=client_build "/target/guacamole-auth-tre-${GUACAMOLE_AZURE_VERSION}.jar" "${GUACAMOLE_HOME}/extensions/"
RUN wget -O "${GUACAMOLE_HOME}/guacamole.war" "http://apache.org/dyn/closer.cgi?action=download&filename=guacamole/1.5.3/binary/guacamole-1.5.3.war" --progress=dot:giga
ENV OAUTH2_PROXY_HOME=/etc/oauth2-proxy
RUN OAUTH2_PROXY_ARCHIVE=oauth2-proxy.tar.gz && \
wget -O "$OAUTH2_PROXY_ARCHIVE" "https://github.com/oauth2-proxy/oauth2-proxy/releases/download/v7.4.0/oauth2-proxy-v7.4.0.linux-amd64.tar.gz" --progress=dot:giga && \
mkdir -p "$OAUTH2_PROXY_HOME" && \
tar zxpf "$OAUTH2_PROXY_ARCHIVE" -C "$OAUTH2_PROXY_HOME" --strip-components=1 && \
rm -f "$OAUTH2_PROXY_ARCHIVE"
COPY ./docker/index.jsp "$CATALINA_BASE"/webapps/ROOT/index.jsp
RUN wget -O "/tmp/applicationinsights-agent.jar" "https://github.com/microsoft/ApplicationInsights-Java/releases/download/3.4.18/applicationinsights-agent-3.4.18.jar" --progress=dot:giga
ENV CATALINA_OPTS="$CATALINA_OPTS -javaagent:/tmp/applicationinsights-agent.jar"
ENTRYPOINT [ "/init" ]
EXPOSE 80 2222
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/Dockerfile/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/Dockerfile",
"repo_id": "AzureTRE",
"token_count": 1253
}
| 124 |
# Guacamole Authorization Extension
This extension is built (maven) and is placed inside the extension directory.
Guacamole tries to authorize using all the given extensions.
Read more [here](https://guacamole.apache.org/doc/gug/guacamole-ext.html).
## TRE Authorization extension
This extension works in the following manner:
1. receives a token from the OpenId extension
2. The extension call the project api to get the user's vm list
3. When connect request is made, the extension call the project api to get the password to the selected vm and inject it into the Guacamole configurations.
## OAuth2 Proxy
- The extention uses [OAuth2_Proxy](https://github.com/oauth2-proxy/oauth2-proxy) which is a reverse proxy and static file server that provides authentication using Providers to validate accounts by email, domain or group.
- The current version that is being used is **7.4.0.**
- The main file that controls the behavior of the oauth2 proxy is the [run](/workspaces/AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/oauth/run) file, which contains all the runtime arguments.
- Some important notes on the way we use the oauth2 proxy:
- Guacamole auth extention uses the generic provider (oidc) since the Azure provider is broken in the proxy repository.
- When upgraded to version 7.4.0, \
`--insecure-oidc-allow unverified-email true,
--oidc-groups-claim "roles"` were added becaue of this following [issue](https://github.com/oauth2-proxy/oauth2-proxy/issues/1680).
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/readme.md/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/readme.md",
"repo_id": "AzureTRE",
"token_count": 423
}
| 125 |
package org.apache.guacamole.auth.azuretre.connection;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.auth.azuretre.user.AzureTREAuthenticatedUser;
import org.apache.guacamole.net.auth.AuthenticatedUser;
import org.apache.guacamole.net.auth.Connection;
import org.junit.jupiter.api.Test;
import org.mockito.Mock;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
class ConnectionServiceTest {
@Mock
AuthenticatedUser authenticatedUser;
@Test
public void getConnectionsWhenEmpty() {
final Map<String, Connection> connectionList = Collections.emptyMap();
testGetConnections(connectionList);
}
@Test
public void getConnectionsWhenMany() {
final Map<String, Connection> connectionList = new HashMap<>() {{
put("dummy_connection", null);
}};
testGetConnections(connectionList);
}
private void testGetConnections(final Map<String, Connection> connectionList) {
try (MockedStatic<ConnectionService> connectionServiceMockedStatic = Mockito.mockStatic(
ConnectionService.class)) {
connectionServiceMockedStatic.when(() -> ConnectionService.getConnections(
(AzureTREAuthenticatedUser) authenticatedUser))
.thenReturn(connectionList);
assertEquals(connectionList, ConnectionService.getConnections(
(AzureTREAuthenticatedUser) authenticatedUser));
} catch (final GuacamoleException e) {
e.printStackTrace();
}
}
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/connection/ConnectionServiceTest.java/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/connection/ConnectionServiceTest.java",
"repo_id": "AzureTRE",
"token_count": 647
}
| 126 |
# Guacamole User Resources
This folder contains user resources that can be deployed with the Guacamole workspace service:
- linuxvm - a Linux-based virtual machine (expects an Ubuntu 18.04-based VM)
- windowsvm - A Windows-based virtual machine
## Customising the user resources
The `guacamole-azure-linuxvm` and `guacamole-azure-windowsvm` folders follow a consistent layout.
To update one of these templates (or to create a new template based on these folders) to use different image details or VM sizes, there are a few files that need to be updated:
| File | Description |
| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `porter.yaml` | This file describes the template and the name should be updated when creating a template based on the folder.<br> This file also contains a `custom` data section that describes the VM properties.<br> Additionally, the version needs to be updated to deploy an updated version |
| `template_schema.json` | This file controls the validation applied to the template, for example specifying the valid options for fields such as size and image |
### Configuration
In `porter.yaml`, the `custom` section contains a couple of sub-sections (shown below)
```yaml
custom:
# For information on vm_sizes and image_options, see README.me in the guacamole/user-resources folder
vm_sizes:
"2 CPU | 8GB RAM": Standard_D2s_v5
"4 CPU | 16GB RAM": Standard_D4s_v5
"8 CPU | 32GB RAM": Standard_D8s_v5
"16 CPU | 64GB RAM": Standard_D16s_v5
image_options:
"Ubuntu 18.04":
source_image_reference:
publisher: canonical
offer: ubuntuserver
sku: 18_04-lts-gen2
version: latest
install_ui: true
conda_config: false
"Ubuntu 18.04 Data Science VM":
source_image_reference:
publisher: microsoft-dsvm
offer: ubuntu-1804
sku: 1804-gen2
version: latest
install_ui: false
conda_config: true
# "Custom Image From Gallery":
# source_image_name: your-image
# install_ui: true
# conda_config: true
```
The `vm_sizes` section is a map of a custom SKU description to the SKU identifier.
The `image_options` section defined the possible image choices for the template (note that the name of the image used here needs to be included in the corresponding enum in `template_schema.json`).
Within the image definition in `image_options` there are a few properties that can be specified:
| Name | Description |
| ------------------------ | -------------------------------------------------------------------------------------------------------- |
| `source_image_name` | Specify VM image to use by name (see notes below for identifying the image gallery containing the image) |
| `source_image_reference` | Specify VM image to use by `publisher`, `offer`, `sku` & `version` (e.g. for Azure Marketplace images) |
| `install_ui` | (Linux only) Set `true` to install desktop environment |
| `conda_config` | Set true to configure conda |
When specifying images using `source_image_name`, the image must be stored in an [image gallery](https://learn.microsoft.com/en-us/azure/virtual-machines/azure-compute-gallery).
To enable re-using built user resource templates across environments where the image may vary, the image gallery is configured via the `RP_BUNDLE_VALUES` environment variable when deploying the TRE.
The `RP_BUNDLE_VALUES` variable is a JSON object, and the `image_gallery_id` property within it identifies the image gallery that contains the images specified by `source_image_name`:
```bash
RP_BUNDLE_VALUES='{"image_gallery_id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/<your-rg>/providers/Microsoft.Compute/galleries/<your-gallery-name>"}
```
|
AzureTRE/templates/workspace_services/guacamole/user_resources/README.md/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/README.md",
"repo_id": "AzureTRE",
"token_count": 1822
}
| 127 |
output "ip" {
value = azurerm_network_interface.internal.private_ip_address
}
output "hostname" {
value = azurerm_windows_virtual_machine.windowsvm.name
}
output "azure_resource_id" {
value = azurerm_windows_virtual_machine.windowsvm.id
}
output "connection_uri" {
value = "https://${data.azurerm_linux_web_app.guacamole.default_hostname}/?/client/${textencodebase64("${azurerm_windows_virtual_machine.windowsvm.name}\u0000c\u0000azuretre", "UTF-8")}"
}
output "vm_username" {
value = random_string.username.result
}
output "vm_password_secret_name" {
value = local.vm_password_secret_name
}
output "keyvault_name" {
value = local.keyvault_name
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 245
}
| 128 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/template_schema.json",
"type": "object",
"title": "Windows Virtual Machine",
"description": "Windows virtual machine.",
"required": [
],
"authorizedRoles": [
"WorkspaceOwner", "WorkspaceResearcher"
],
"properties": {
"os_image": {
"$id": "#/properties/os_image",
"type": "string",
"title": "Windows image",
"description": "Select Windows image to use for VM",
"enum": [
"Windows 10",
"Windows 11",
"Server 2019 Data Science VM"
]
},
"vm_size": {
"$id": "#/properties/vm_size",
"type": "string",
"title": "VM Size",
"description": "Select size of VM",
"enum": [
"2 CPU | 8GB RAM",
"4 CPU | 16GB RAM",
"8 CPU | 32GB RAM",
"16 CPU | 64GB RAM"
],
"updateable": true
},
"shared_storage_access": {
"$id": "#/properties/shared_storage_access",
"type": "boolean",
"title": "Shared storage",
"default": true,
"description": "Enable access to shared storage"
}
}
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 624
}
| 129 |
data "azurerm_resource_group" "ws" {
name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_virtual_network" "ws" {
name = "vnet-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_key_vault" "ws" {
name = local.keyvault_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_key_vault_secret" "aad_tenant_id" {
name = "auth-tenant-id"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_subnet" "services" {
name = "ServicesSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_private_dns_zone" "health" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurehealthcareapis.com"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_private_dns_zone" "dicom" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.dicom.azurehealthcareapis.com"]
resource_group_name = local.core_resource_group_name
}
|
AzureTRE/templates/workspace_services/health-services/terraform/data.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/health-services/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 539
}
| 130 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/innereye/template_schema.json",
"type": "object",
"title": "InnerEye service template",
"description": "Installs Azure ML and InnerEye components. Please be aware this template opens up additional firewall rules to enable Azure ML to function.",
"required": [],
"properties": {
"inference_sp_client_id": {
"type": "string",
"title": "Service Principal Client ID for AML Access",
"description": "The client id of the service principal used to access the Azure ML workspace",
"default": ""
},
"inference_sp_client_secret": {
"type": "string",
"title": "Service Principal Client Secret for AML Access",
"description": "The client secret of the service principal used to access the Azure ML workspace",
"default": ""
}
}
}
|
AzureTRE/templates/workspace_services/innereye/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 385
}
| 131 |
mlflow server \
--backend-store-uri "$MLFLOW_SERVER_FILE_STORE" \
--default-artifact-root "$MLFLOW_SERVER_DEFAULT_ARTIFACT_ROOT" \
--host "$MLFLOW_SERVER_HOST" \
--port "$MLFLOW_SERVER_PORT" \
--workers "$MLFLOW_SERVER_WORKERS"
|
AzureTRE/templates/workspace_services/mlflow/mlflow-server/docker/startup.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/mlflow-server/docker/startup.sh",
"repo_id": "AzureTRE",
"token_count": 113
}
| 132 |
variable "workspace_id" {
type = string
}
variable "tre_id" {
type = string
}
variable "tre_resource_id" {
type = string
}
variable "mgmt_acr_name" {
type = string
}
variable "mgmt_resource_group_name" {
type = string
}
variable "is_exposed_externally" {
type = bool
description = "Is the webapp available on the public internet"
default = false
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/workspace_services/mlflow/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 154
}
| 133 |
ID="__CHANGE_ME__"
WORKSPACE_ID="__CHANGE_ME__"
TRE_ID="__CHANGE_ME__"
MGMT_RESOURCE_GROUP_NAME="__CHANGE_ME__"
MGMT_ACR_NAME="__CHANGE_ME__"
|
AzureTRE/templates/workspace_services/ohdsi/.env.sample/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/.env.sample",
"repo_id": "AzureTRE",
"token_count": 71
}
| 134 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/ohdsi/template_schema.json",
"type": "object",
"title": "OHDSI Workspace Service",
"description": "Provides OHDSI within the workspace",
"required": [],
"properties": {
"display_name": {
"type": "string",
"title": "Name for the workspace service",
"description": "The name of the workspace service to be displayed to users",
"default": "OHDSI ATLAS",
"updateable": true
},
"description": {
"type": "string",
"title": "Description of the workspace service",
"description": "Description of the workspace service",
"default": "OHDSI empowers researchers and collaborators to bring out the value of health data through large-scale analytics and open-source solutions.",
"updateable": true
},
"overview": {
"type": "string",
"title": "Workspace Service Overview",
"description": "Long form description of the workspace service, in markdown syntax",
"default": "OHDSI (Observational Health Data Sciences and Informatics) is a community-driven open-source project that provides tools and resources to help researchers and clinicians make better decisions about healthcare. OHDSI's Common Data Model (CDM) is a standardized format for storing and analyzing observational health data. The CDM can be used to create a variety of machine learning models, including patient-level prediction models, population-level effect estimation models, and risk prediction models.",
"updateable": true
},
"address_space": {
"$id": "#/properties/address_space",
"type": "string",
"title": "Address space",
"description": "Address space for PostgreSQL's subnet"
},
"configure_data_source": {
"type": "boolean",
"title": "Configure Data Source",
"default": true,
"updateable": true
}
},
"allOf": [
{
"if": {
"properties": {
"configure_data_source": {
"const": true
}
},
"required": [
"configure_data_source"
]
},
"then": {
"properties": {
"data_source_config": {
"type": "object",
"title": "Data Source Configuration",
"default": null,
"properties": {
"dialect": {
"type": "string",
"title": "Dialect",
"default": "Azure Synapse",
"enum": [
"Azure Synapse"
],
"updateable": true
},
"source_name": {
"type": "string",
"title": "Source Name"
},
"source_key": {
"type": "string",
"title": "Source Key",
"description": "A unique source key"
},
"connection_string": {
"type": "string",
"title": "Connection String"
},
"username": {
"type": "string",
"title": "Username"
},
"password": {
"type": "string",
"title": "Password",
"format": "password"
}
},
"required": [
"source_name",
"dialect",
"source_key",
"connection_string",
"username",
"password"
]
},
"data_source_daimons": {
"type": "object",
"title": "Source Daimons",
"description": "Configure source daimons",
"default": null,
"properties": {
"daimon_cdm": {
"type": "string",
"title": "CDM"
},
"daimon_vocabulary": {
"type": "string",
"title": "Vocabulary"
},
"daimon_results": {
"type": "string",
"title": "Results Schema to copy"
},
"daimon_cem": {
"type": "string",
"title": "CEM"
},
"daimon_cem_results": {
"type": "string",
"title": "CEMResults"
},
"daimon_temp": {
"type": "string",
"title": "Temp Schema to copy"
}
},
"required": [
"daimon_cdm",
"daimon_vocabulary",
"daimon_results"
]
}
}
}
}
],
"uiSchema": {
"address_space": {
"classNames": "tre-hidden"
}
},
"pipeline": {
"install": [
{
"stepId": "b9d7370e-c624-4263-921c-632329974872",
"stepTitle": "Upgrade workspace to ensure the existence of Postgres' address space",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": []
},
{
"stepId": "main"
},
{
"stepId": "7276dcc1-7d0e-496a-badf-87c8c25fc06e",
"stepTitle": "Add Atlas callback URI as AAD redirect URI",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": [
{
"name": "aad_redirect_uris",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "{{ resource.id }}",
"value": "{{ resource.properties.authentication_callback_uri }}"
}
}
]
},
{
"stepId": "37d7fbde-fd61-4096-ac4d-741960474995",
"stepTitle": "Add firewall rules for AAD",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}",
"action": "Allow",
"rules": [
{
"name": "AzureAD",
"description": "Allow access to AAD",
"source_addresses": "{{ resource.parent.properties.address_spaces }}",
"destination_addresses": [
"AzureActiveDirectory"
],
"destination_ports": [
"*"
],
"protocols": [
"TCP"
]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}",
"action": "Allow",
"rules": [
{
"name": "microsoft-aad",
"description": "Allow AAD FQDNs",
"source_addresses": "{{ resource.parent.properties.address_spaces }}",
"target_fqdns": [
"*.msftauth.net",
"*.msauth.net",
"login.microsoftonline.com",
"aadcdn.msftauthimages.net",
"aadcdn.msauthimages.net",
"*.login.live.com",
"*.microsoftonline-p.com",
"msft.sts.microsoft.com"
],
"protocols": [
{
"port": "80",
"type": "Http"
},
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"upgrade": [
{
"stepId": "01dfec9b-ecc5-42c4-a022-050930a29916",
"stepTitle": "Upgrade workspace to ensure the existence of Postgres' address space",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": []
},
{
"stepId": "main"
},
{
"stepId": "0d3961fd-1538-4b0e-a6ed-bf401b65c034",
"stepTitle": "Upgrade Atlas callback URI as AAD redirect URI",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": [
{
"name": "aad_redirect_uris",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "{{ resource.id }}",
"value": "{{ resource.properties.authentication_callback_uri }}"
}
}
]
},
{
"stepId": "3329d760-3e09-4721-8722-f369b123ca77",
"stepTitle": "Add firewall rules for AAD",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}",
"action": "Allow",
"rules": [
{
"name": "AzureAD",
"description": "Allow access to AAD",
"source_addresses": "{{ resource.parent.properties.address_spaces }}",
"destination_addresses": [
"AzureActiveDirectory"
],
"destination_ports": [
"*"
],
"protocols": [
"TCP"
]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}",
"action": "Allow",
"rules": [
{
"name": "microsoft-aad",
"description": "Allow AAD FQDNs",
"source_addresses": "{{ resource.parent.properties.address_spaces }}",
"target_fqdns": [
"*.msftauth.net",
"*.msauth.net",
"login.microsoftonline.com",
"aadcdn.msftauthimages.net",
"aadcdn.msauthimages.net",
"*.login.live.com",
"*.microsoftonline-p.com",
"msft.sts.microsoft.com"
],
"protocols": [
{
"port": "80",
"type": "Http"
},
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"uninstall": [
{
"stepId": "e1986fe8-b1f9-4a9d-abb1-da1ea9a50b41",
"stepTitle": "Remove Atlas callback URI as AAD redirect URI",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": [
{
"name": "aad_redirect_uris",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "{{ resource.id }}"
}
}
]
},
{
"stepId": "main"
}
]
}
}
|
AzureTRE/templates/workspace_services/ohdsi/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 6895
}
| 135 |
output "app_role_workspace_owner_id" {
value = random_uuid.app_role_workspace_owner_id.result
}
output "app_role_workspace_researcher_id" {
value = random_uuid.app_role_workspace_researcher_id.result
}
output "app_role_workspace_airlock_manager_id" {
value = random_uuid.app_role_workspace_airlock_manager_id.result
}
output "client_id" {
value = azuread_application.workspace.application_id
}
output "scope_id" {
value = "api://${var.workspace_resource_name_suffix}"
}
output "sp_id" {
value = azuread_service_principal.workspace.object_id
}
|
AzureTRE/templates/workspaces/base/terraform/aad/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/aad/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 217
}
| 136 |
#!/bin/bash
set -e
# This script is not used and is left here for you to debug the creation of the workspace
# at a Terraform level without having to interact with Porter
# This script assumes you have created an .env from the sample and the variables
# will come from there.
# shellcheck disable=SC2154
terraform init -reconfigure -input=false -backend=true \
-backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name}" \
-backend-config="storage_account_name=${TF_VAR_mgmt_storage_account_name}" \
-backend-config="container_name=${TF_VAR_terraform_state_container_name}" \
-backend-config="key=${TF_VAR_tre_id}-ws-${TF_VAR_tre_resource_id}"
terraform apply -auto-approve
|
AzureTRE/templates/workspaces/base/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 239
}
| 137 |
variable "tre_id" {
type = string
description = "Unique TRE ID"
}
variable "tre_resource_id" {
type = string
description = "Resource ID"
}
variable "shared_storage_quota" {
type = number
default = 50
description = "Quota (in GB) to set for the VM Shared Storage."
}
variable "location" {
type = string
description = "Azure location (region) for deployment of core TRE services"
}
variable "address_spaces" {
type = string
description = "VNet address space (base 64)"
}
variable "deploy_app_service_plan" {
type = bool
default = true
description = "Deploy app service plan"
}
variable "app_service_plan_sku" {
type = string
description = "App Service Plan SKU"
}
variable "enable_local_debugging" {
type = bool
default = false
description = "This will allow storage account access over the internet. Set to true to allow deploying this from a local machine."
}
variable "register_aad_application" {
type = bool
default = false
description = "Create an AAD application automatically for the Workspace."
}
variable "create_aad_groups" {
type = bool
default = false
description = "Create AAD groups automatically for the Workspace Application Roles."
}
variable "enable_airlock" {
type = bool
description = "Controls the deployment of Airlock resources in the workspace."
}
variable "aad_redirect_uris_b64" {
type = string # B64 encoded list of objects like [{"name": "my uri 1", "value": "https://..."}, {}]
default = "W10=" #b64 for []
}
variable "auth_tenant_id" {
type = string
description = "Used to authenticate into the AAD Tenant to create the AAD App"
}
variable "auth_client_id" {
type = string
description = "Used to authenticate into the AAD Tenant to create the AAD App"
}
variable "auth_client_secret" {
type = string
description = "Used to authenticate into the AAD Tenant to create the AAD App"
}
# These variables are only passed in if you are not registering an AAD
# application as they need passing back out
variable "app_role_id_workspace_owner" {
type = string
default = ""
description = "The id of the application role WorkspaceOwner in the identity provider, this is passed in so that we may return it as an output."
}
variable "app_role_id_workspace_researcher" {
type = string
default = ""
description = "The id of the application role WorkspaceResearcher in the identity provider, this is passed in so that we may return it as an output."
}
variable "app_role_id_workspace_airlock_manager" {
type = string
default = ""
description = "The id of the application role AirlockManager in the identity provider, this is passed in so that we may return it as an output."
}
variable "client_id" {
type = string
default = ""
description = "The client id of the workspace in the identity provider, this is passed in so that we may return it as an output."
}
variable "client_secret" {
type = string
default = ""
description = "The client secret of the workspace in the identity provider, this is passed in so that we may return it as an output."
}
variable "sp_id" {
type = string
default = ""
description = "The Service Principal in the Identity provider to be able to get claims, this is passed in so that we may return it as an output."
}
variable "scope_id" {
type = string
default = ""
description = "The Service Principal Name or Identifier URI, this is passed in so that we may return it as an output."
}
variable "workspace_owner_object_id" {
type = string
default = ""
description = "The Object Id of the user that you wish to be the Workspace Owner. E.g. the TEST_AUTOMATION_ACCOUNT."
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/workspaces/base/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 1273
}
| 138 |
import { Dialog, DialogFooter, PrimaryButton, DialogType, Spinner, Dropdown, MessageBar, MessageBarType, Icon } from '@fluentui/react';
import React, { useContext, useState } from 'react';
import { AvailableUpgrade, Resource } from '../../models/resource';
import { HttpMethod, ResultType, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { ResourceType } from '../../models/resourceType';
import { APIError } from '../../models/exceptions';
import { LoadingState } from '../../models/loadingState';
import { ExceptionLayout } from './ExceptionLayout';
import { useAppDispatch } from '../../hooks/customReduxHooks';
import { addUpdateOperation } from '../shared/notifications/operationsSlice';
interface ConfirmUpgradeProps {
resource: Resource,
onDismiss: () => void
}
export const ConfirmUpgradeResource: React.FunctionComponent<ConfirmUpgradeProps> = (props: ConfirmUpgradeProps) => {
const apiCall = useAuthApiCall();
const [selectedVersion, setSelectedVersion] = useState("")
const [apiError, setApiError] = useState({} as APIError);
const [requestLoadingState, setRequestLoadingState] = useState(LoadingState.Ok);
const workspaceCtx = useContext(WorkspaceContext);
const dispatch = useAppDispatch();
const upgradeProps = {
type: DialogType.normal,
title: `Upgrade Template Version?`,
closeButtonAriaLabel: 'Close',
subText: `Are you sure you want upgrade the template version of ${props.resource.properties.display_name} from version ${props.resource.templateVersion}?`,
};
const dialogStyles = { main: { maxWidth: 450 } };
const modalProps = {
titleAriaId: 'labelId',
subtitleAriaId: 'subTextId',
isBlocking: true,
styles: dialogStyles
};
const wsAuth = (props.resource.resourceType === ResourceType.WorkspaceService || props.resource.resourceType === ResourceType.UserResource);
const upgradeCall = async () => {
setRequestLoadingState(LoadingState.Loading);
try {
let body = { templateVersion: selectedVersion }
let op = await apiCall(props.resource.resourcePath,
HttpMethod.Patch,
wsAuth ? workspaceCtx.workspaceApplicationIdURI : undefined,
body,
ResultType.JSON,
undefined,
undefined,
props.resource._etag);
dispatch(addUpdateOperation(op.operation));
props.onDismiss();
} catch (err: any) {
err.userMessage = 'Failed to upgrade resource';
setApiError(err);
setRequestLoadingState(LoadingState.Error);
}
}
const onRenderOption = (option: any): JSX.Element => {
return (
<div>
{option.data && option.data.icon && (
<Icon style={{ marginRight: '8px' }} iconName={option.data.icon} aria-hidden="true" title={option.data.icon} />
)}
<span>{option.text}</span>
</div>
);
};
const convertToDropDownOptions = (upgrade: Array<AvailableUpgrade>) => {
return upgrade.map(upgrade => ({ "key": upgrade.version, "text": upgrade.version, data: { icon: upgrade.forceUpdateRequired ? 'Warning' : '' } }))
}
const getDropdownOptions = () => {
const options = []
const nonMajorUpgrades = props.resource.availableUpgrades.filter(upgrade => !upgrade.forceUpdateRequired)
options.push(...convertToDropDownOptions(nonMajorUpgrades))
return options;
}
return (<>
<Dialog
hidden={false}
onDismiss={() => props.onDismiss()}
dialogContentProps={upgradeProps}
modalProps={modalProps}
>
{
requestLoadingState === LoadingState.Ok &&
<>
<MessageBar messageBarType={MessageBarType.warning} >Upgrading the template version is irreversible.</MessageBar>
<DialogFooter>
<Dropdown
placeholder='Select Version'
options={getDropdownOptions()}
onRenderOption={onRenderOption}
styles={{ dropdown: { width: 125 } }}
onChange={(event, option) => { option && setSelectedVersion(option.text); }}
selectedKey={selectedVersion}
/>
<PrimaryButton primaryDisabled={!selectedVersion} text="Upgrade" onClick={() => upgradeCall()} />
</DialogFooter>
</>
}
{
requestLoadingState === LoadingState.Loading &&
<Spinner label="Sending request..." ariaLive="assertive" labelPosition="right" />
}
{
requestLoadingState === LoadingState.Error &&
<ExceptionLayout e={apiError} />
}
</Dialog>
</>);
};
|
AzureTRE/ui/app/src/components/shared/ConfirmUpgradeResource.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ConfirmUpgradeResource.tsx",
"repo_id": "AzureTRE",
"token_count": 1658
}
| 139 |
import { IStackStyles, Spinner, SpinnerSize, Stack } from "@fluentui/react";
import React, { useEffect, useContext, useState } from 'react';
import { useParams } from 'react-router-dom';
import { HttpMethod, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { Operation } from '../../models/operation';
import { Resource } from '../../models/resource';
import { ApiEndpoint } from '../../models/apiEndpoints';
import { ResourceOperationListItem } from './ResourceOperationListItem';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import config from '../../config.json';
import moment from "moment";
import { APIError } from "../../models/exceptions";
import { LoadingState } from "../../models/loadingState";
import { ExceptionLayout } from "./ExceptionLayout";
import { ResourceOperationStepsList } from "./ResourceOperationStepsList";
interface ResourceOperationsListProps {
resource: Resource
}
export const ResourceOperationsList: React.FunctionComponent<ResourceOperationsListProps> = (props: ResourceOperationsListProps) => {
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
const workspaceCtx = useContext(WorkspaceContext);
const { resourceId } = useParams();
const [resourceOperations, setResourceOperations] = useState([] as Array<Operation>)
const [loadingState, setLoadingState] = useState('loading');
useEffect(() => {
const getOperations = async () => {
try {
// get resource operations
const scopeId = workspaceCtx.roles && workspaceCtx.roles.length > 0 ? workspaceCtx.workspaceApplicationIdURI : "";
const ops = await apiCall(`${props.resource.resourcePath}/${ApiEndpoint.Operations}`, HttpMethod.Get, scopeId);
config.debug && console.log(`Got resource operations, for resource:${props.resource.id}: ${ops.operations}`);
setResourceOperations(ops.operations.reverse());
setLoadingState(ops && ops.operations.length > 0 ? LoadingState.Ok : LoadingState.Error);
} catch (err: any) {
err.userMessage = "Error retrieving resource operations"
setApiError(err);
setLoadingState(LoadingState.Error);
}
};
getOperations();
}, [apiCall, props.resource, resourceId, workspaceCtx.roles, workspaceCtx.workspaceApplicationIdURI]);
const stackStyles: IStackStyles = {
root: {
padding: 0,
minWidth: 300
}
};
switch (loadingState) {
case LoadingState.Ok:
return (
<>
{
resourceOperations && resourceOperations.map((op: Operation, i: number) => {
return (
<Stack wrap horizontal style={{ borderBottom: '1px #999 solid', padding: '10px 0' }} key={i}>
<Stack grow styles={stackStyles}>
<ResourceOperationListItem header={'Resource Id'} val={op.resourceId} />
<ResourceOperationListItem header={'Resource Path'} val={op.resourcePath} />
<ResourceOperationListItem header={'Resource Version'} val={op.resourceVersion.toString()} />
<ResourceOperationListItem header={'Status'} val={op.status} />
<ResourceOperationListItem header={'Action'} val={op.action} />
<ResourceOperationListItem header={'Message'} val={op.message} />
<ResourceOperationListItem header={'Created'} val={`${moment.unix(op.createdWhen).toLocaleString()} (${moment.unix(op.createdWhen).fromNow()})`} />
<ResourceOperationListItem header={'Updated'} val={`${moment.unix(op.updatedWhen).toLocaleString()} (${moment.unix(op.updatedWhen).fromNow()})`} />
<ResourceOperationListItem header={'User'} val={op.user.name} />
<ResourceOperationStepsList header={'Steps'} val={op.steps} />
</Stack>
</Stack>
)
})
}
</>
);
case LoadingState.Error:
return (
<ExceptionLayout e={apiError} />
)
default:
return (
<div style={{ marginTop: '20px' }}>
<Spinner label="Loading operations" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
)
}
};
|
AzureTRE/ui/app/src/components/shared/ResourceOperationsList.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceOperationsList.tsx",
"repo_id": "AzureTRE",
"token_count": 1646
}
| 140 |
import React, { useEffect, useState } from 'react';
import { Icon, ProgressIndicator, Link as FluentLink, Stack, DefaultPalette, Shimmer, ShimmerElementType } from '@fluentui/react';
import { TRENotification } from '../../../models/treNotification';
import { awaitingStates, completedStates, failedStates, inProgressStates, Operation, OperationStep } from '../../../models/operation';
import { Link } from 'react-router-dom';
import moment from 'moment';
import { useInterval } from './useInterval';
import { HttpMethod, useAuthApiCall } from '../../../hooks/useAuthApiCall';
import { ApiEndpoint } from '../../../models/apiEndpoints';
import { getResourceFromResult, Resource } from '../../../models/resource';
import { NotificationPoller } from './NotificationPoller';
import { APIError } from '../../../models/exceptions';
import { ExceptionLayout } from '../ExceptionLayout';
import { addUpdateOperation } from './operationsSlice';
import { useAppDispatch } from '../../../hooks/customReduxHooks';
interface NotificationItemProps {
operation: Operation,
showCallout: (o: Operation, r: Resource) => void;
}
export const NotificationItem: React.FunctionComponent<NotificationItemProps> = (props: NotificationItemProps) => {
const [now, setNow] = useState(moment.utc());
const [isExpanded, setIsExpanded] = useState(false);
const [notification, setNotification] = useState({} as TRENotification);
const [loadingNotification, setLoadingNotification] = useState(true);
const [errorNotification, setErrorNotification] = useState(false);
const dispatch = useAppDispatch();
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
const getRelativeTime = (createdWhen: number) => {
return (moment.utc(moment.unix(createdWhen))).from(now);
};
useEffect(() => {
const setupNotification = async (op: Operation) => {
// ignore if we've already set this operation up
if (notification.resource) return;
let ws = null;
let resource = null;
try {
// is this a workspace, or workspace child resource operation?
if (op.resourcePath.indexOf(ApiEndpoint.Workspaces) !== -1) {
const wsId = op.resourcePath.split('/')[2];
let scopeId = (await apiCall(`${ApiEndpoint.Workspaces}/${wsId}/scopeid`, HttpMethod.Get)).workspaceAuth.scopeId;
ws = (await apiCall(`${ApiEndpoint.Workspaces}/${wsId}`, HttpMethod.Get, scopeId)).workspace;
// is a workspace child resource operation
if (op.resourcePath.split('/').length >= 3) {
let r = await apiCall(op.resourcePath, HttpMethod.Get, scopeId);
resource = getResourceFromResult(r);
// is a workspace operation
} else {
resource = ws;
}
} else {
let r = await apiCall(op.resourcePath, HttpMethod.Get);
resource = getResourceFromResult(r);
}
setNotification({ operation: op, resource: resource, workspace: ws });
} catch (err: any) {
err.userMessage = `Error retrieving operation details for ${props.operation.id}`;
setApiError(err);
setErrorNotification(true);
}
setLoadingNotification(false);
};
setupNotification(props.operation);
}, [props.operation, apiCall, notification.resource]);
// update the 'now' time for comparison
useInterval(() => {
setNow(moment.utc());
}, 10000);
const getIconAndColourForStatus = (status: string) => {
if (failedStates.includes(status)) return ['ErrorBadge', 'red'];
if (completedStates.includes(status)) return ['SkypeCheck', 'green'];
if (awaitingStates.includes(status)) return ['Clock', '#cccccc'];
return ['ProgressLoopInner', DefaultPalette.themePrimary];
};
const updateOperation = (operation: Operation) => {
dispatch(addUpdateOperation(operation));
if (completedStates.includes(operation.status)) {
props.showCallout(operation, notification.resource);
}
};
return (
<>
{
props.operation.dismiss ? <></> :
loadingNotification ?
<li>
<Shimmer shimmerElements={[{ type: ShimmerElementType.gap, width: '100%' }]} />
<Shimmer width="50%" />
<Shimmer shimmerElements={[{ type: ShimmerElementType.gap, width: '100%' }]} />
<Shimmer />
<Shimmer shimmerElements={[{ type: ShimmerElementType.gap, width: '100%' }]} />
<Shimmer />
</li>
:
errorNotification ?
<li>
<ExceptionLayout e={apiError} />
</li>
:
<li className="tre-notification-item">
{
inProgressStates.indexOf(props.operation.status) !== -1 &&
<NotificationPoller notification={notification} updateOperation={(operation: Operation) => updateOperation(operation)} />
}
<ProgressIndicator
barHeight={4}
percentComplete={awaitingStates.includes(props.operation.status) ? 0 : completedStates.includes(props.operation.status) ? 100 : undefined}
label={<Link style={{ textDecoration: 'none', fontWeight: 'bold', color: DefaultPalette.themePrimary }} to={props.operation.resourcePath}>
<Icon iconName={getIconAndColourForStatus(props.operation.status)[0]} style={{ color: getIconAndColourForStatus(props.operation.status)[1], position: 'relative', top: '2px', marginRight: '10px' }} />
{notification.resource.properties.display_name}: {props.operation.action}
</Link>}
description={`${notification.resource.resourceType} is ${props.operation.status}`} />
<Stack horizontal style={{ marginTop: '10px' }}>
<Stack.Item grow={5}>
{
props.operation.steps && props.operation.steps.length > 0 && !(props.operation.steps.length === 1 && props.operation.steps[0].templateStepId === 'main') ?
<FluentLink title={isExpanded ? 'Show less' : 'Show more'} href="#" onClick={() => { setIsExpanded(!isExpanded); }} style={{ position: 'relative', top: '2px' }}>{isExpanded ? <Icon iconName='ChevronUp' aria-label='Expand Steps' /> : <Icon iconName='ChevronDown' aria-label='Collapse Steps' />}</FluentLink>
:
' '
}
</Stack.Item>
<Stack.Item> <div className="tre-notification-time">{getRelativeTime(props.operation.createdWhen)}</div></Stack.Item>
</Stack>
{
isExpanded &&
<>
<ul className="tre-notifications-steps-list">
{props.operation.steps && props.operation.steps.map((s: OperationStep, i: number) => {
return (
<li key={i}>
<Icon iconName={getIconAndColourForStatus(s.status)[0]} style={{ color: getIconAndColourForStatus(s.status)[1], position: 'relative', top: '2px', marginRight: '10px' }} />
{
s.templateStepId === "main" ?
<>{notification.resource.properties.display_name}: {props.operation.action}</> :
s.stepTitle
}
</li>);
})
}
</ul>
</>
}
</li>
}
</>);
};
|
AzureTRE/ui/app/src/components/shared/notifications/NotificationItem.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/notifications/NotificationItem.tsx",
"repo_id": "AzureTRE",
"token_count": 3328
}
| 141 |
import { createContext } from "react";
import { CostResource } from "../models/costs";
import { LoadingState } from "../models/loadingState";
export const CostsContext = createContext({
costs: [] as Array<CostResource>,
loadingState: {} as LoadingState,
setCosts: (costs: Array<CostResource>) => { },
setLoadingState: (loadingState: LoadingState) => {},
});
|
AzureTRE/ui/app/src/contexts/CostsContext.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/contexts/CostsContext.ts",
"repo_id": "AzureTRE",
"token_count": 105
}
| 142 |
export enum RoleName {
TREAdmin = "TREAdmin",
TREUser = "TREUser"
}
export enum WorkspaceRoleName {
WorkspaceOwner = "WorkspaceOwner",
WorkspaceResearcher = "WorkspaceResearcher",
AirlockManager = "AirlockManager"
}
|
AzureTRE/ui/app/src/models/roleNames.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/roleNames.ts",
"repo_id": "AzureTRE",
"token_count": 85
}
| 143 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import json
data_dir=sys.argv[1]
def sort_triples(triples, text):
sorted_triples = sorted(triples, key=lambda x:text.find(x['drug']))
return sorted_triples
def build_target_seq_relis(triples):
answer = ""
for z in triples:
drug = z["drug"].lower()
target = z["target"].lower()
rel = z["interaction"].lower()
answer += f"the interaction between {drug} and {target} is {rel}; "
return answer[:-2] + "."
def build_target_seq_2type(triples):
answer = ""
for z in triples:
drug = z["drug"].lower()
target = z["target"].lower()
rel = z["interaction"].lower()
answer += f"{drug} and {target} are {rel}; "
return answer[:-2] + "."
def loader(fname, fn):
ret = []
null_cnt = 0
suc_cnt = 0
null_flag = False
with open(fname, "r", encoding="utf8") as fr:
data = json.load(fr)
for pmid, v in data.items():
content = v["abstract"].strip().replace('\n',' ')
content = content.lower()
if v["triples"] is None or len(v["triples"]) == 0:
if not null_flag:
print(f"Following PMID in {fname} has no extracted triples:")
null_flag = True
print(f"{pmid} ", end="")
null_cnt += 1
else:
triples = v['triples']
triples = sort_triples(triples, content)
answer = fn(triples)
ret.append((pmid, content, answer))
suc_cnt += 1
if null_flag:
print("")
print(f"{len(data)} samples in {fname} has been processed with {null_cnt} samples has no triples extracted.")
return ret
def dumper(content_list, prefix):
fw_pmid = open(prefix + ".pmid", "w")
fw_content = open(prefix + ".x", "w")
fw_label = open(prefix + ".y", "w")
for pmid, x, y in content_list:
print(pmid, file=fw_pmid)
print(x, file=fw_content)
print(y, file=fw_label)
fw_pmid.close()
fw_content.close()
fw_label.close()
def worker(fname, prefix, fn):
ret = loader(fname, fn)
dumper(ret, prefix)
for split in ['train', 'valid', 'test']:
worker(os.path.join(f"{data_dir}", f"{split}.json"), os.path.join(f"{data_dir}", f"relis_{split}"), build_target_seq_relis)
|
BioGPT/examples/RE-DDI/rebuild_data.py/0
|
{
"file_path": "BioGPT/examples/RE-DDI/rebuild_data.py",
"repo_id": "BioGPT",
"token_count": 1111
}
| 144 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
def collate(samples, pad_idx, eos_idx, prefix=False, sep_idx=None, prompt=None):
if len(samples) == 0:
return {}
def make_sentence(prompt, source, target):
if source[-1] == eos_idx:
source = source[:-1]
if prompt is None:
return torch.cat([source, target], dim=0)
if prefix:
sep = torch.LongTensor([sep_idx])
return torch.cat([prompt, source, sep, target], dim=0)
return torch.cat([source, prompt, target], dim=0)
def merge(tokens, move_eos_to_beginning=False):
return data_utils.collate_tokens(
tokens,
pad_idx,
eos_idx,
move_eos_to_beginning=move_eos_to_beginning,
)
id = torch.LongTensor([s["id"] for s in samples])
#src_tokens = merge([s["source"] for s in samples])
#src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
target_tokens = []
target_lengths = []
for s in samples:
target_tokens.append(make_sentence(prompt, s["source"], s["target"]))
target_lengths = [t.ne(pad_idx).long().sum() for t in target_tokens]
target = merge(target_tokens)
target_lengths = torch.LongTensor(target_lengths)
prev_output_tokens = merge(target_tokens, move_eos_to_beginning=True)
ntokens = target_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": prev_output_tokens, #src_tokens,
"src_lengths": target_lengths, #src_lengths,
#"prev_output_tokens": prev_output_tokens,
#"target_lengths": target_lengths,
},
"target": target,
}
return batch
class LanguageModelPromptDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
dictionary (~fairseq.data.Dictionary): vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
prefix (bool, optional): prefix
prompt (str, optional): prompt to use
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
max_source_length (int): max source text length
max_length (int): max text length
prompt_length (int): length of the prompt text
"""
def __init__(
self,
src,
src_sizes,
dictionary,
tgt,
tgt_sizes,
prefix=False,
prompt=None,
shuffle=True,
eos=None,
max_source_length=None,
max_length=None,
prompt_length=None,
):
self.src = src
self.tgt = tgt
self.prefix = prefix
self.seq_sep = None
self.prompt = prompt
self.dict = dictionary
self.shuffle = shuffle
self.eos = eos if eos is not None else dictionary.eos()
self.max_source_length = max_source_length
self.max_target_length = max_length - max_source_length - prompt_length
if self.prefix:
self.max_target_length -= 1
self.src_sizes = [min(s-1, self.max_source_length) for s in src_sizes]
self.tgt_sizes = [min(t, self.max_target_length) for t in tgt_sizes]
self.sizes = np.array([s+t for s,t in zip(self.src_sizes, self.tgt_sizes)])
self.buckets = None
def get_batch_shapes(self):
return self.buckets
def __getitem__(self, index):
src_item = self.src[index]
if src_item.size(0) - 1 > self.max_source_length:
src_item = src_item[:self.max_source_length + 1]
src_item[-2] = self.dict.index('...')
src_item[-1] = self.eos
tgt_item = self.tgt[index]
if tgt_item.size(0) > self.max_target_length:
tgt_item = tgt_item[:self.max_target_length]
tgt_item[-2] = self.dict.index('...')
tgt_item[-1] = self.eos
example = {
"id": index,
"source": src_item,
"target": tgt_item,
}
return example
def __len__(self):
return len(self.src)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
- `lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each target sentence of shape `(bsz)`
"""
res = collate(
samples,
pad_idx=self.dict.pad(),
eos_idx=self.dict.eos(),
prefix=self.prefix,
sep_idx=self.dict.sep_index,
prompt=self.prompt,
)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
sizes = self.sizes[indices]
return sizes
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
return indices[np.argsort(self.sizes[indices], kind="mergesort")]
@property
def supports_prefetch(self):
return getattr(self.src, "supports_prefetch", False) and (
getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
)
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
|
BioGPT/src/language_model_prompt_dataset.py/0
|
{
"file_path": "BioGPT/src/language_model_prompt_dataset.py",
"repo_id": "BioGPT",
"token_count": 3441
}
| 145 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#ifndef __LADDER_KERNEL_H__
#define __LADDER_KERNEL_H__
#include <cuda_fp16.h>
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 800)
#define TVM_ENABLE_L2_PREFETCH 1
#else
#define TVM_ENABLE_L2_PREFETCH 0
#endif
#ifdef _WIN32
using uint = unsigned int;
using uchar = unsigned char;
using ushort = unsigned short;
using int64_t = long long;
using uint64_t = unsigned long long;
#else
#define uint unsigned int
#define uchar unsigned char
#define ushort unsigned short
#define int64_t long long
#define uint64_t unsigned long long
#endif
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 800)
#define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 1
#else
#define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 0
#endif
template <typename T1, typename T2, bool isSigned = false>
__device__ void decode_i4b_to_f16(T1 *_i4s, T2 *B_local_decode, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
}
}
template <typename T1, typename T2>
__device__ void decode_i4s_to_f16(T1 *_i4s, T2 *B_local_decode, const int N = 8)
{
decode_i4b_to_f16<T1, T2, true>(_i4s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i4u_to_f16(T1 *_i4u, T2 *B_local_decode, const int N = 8)
{
decode_i4b_to_f16<T1, T2, false>(_i4u, B_local_decode, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale_zeros_rescale(T1 *_i4s, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i4u_to_f16_scale_zeros_rescale(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale_zeros_rescale<T1, T2, T3, false>(_i4u, B_local_decode, scale, zeros, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i4b_to_f16_scale_zeros_original(T1 *_i4s, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x000f000f;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
// Minus 7 to scale the value to signed
static constexpr uint MEDIAN_NUM = isSigned ? 0x64076407 : 0x64006400;
uint const i4s = *reinterpret_cast<uint *>(_i4s);
#pragma unroll
// decode 2 elems at one time.
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i4s >> (4 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i4u_to_f16_scale_zeros_original(T1 *_i4u, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
decode_i4b_to_f16_scale_zeros_original<T1, T2, T3, false>(_i4u, B_local_decode, scale, zeros, N);
}
template <typename T1, typename T2, bool isSigned = false>
__device__ void decode_i2b_to_f16(T1 *_i2s, T2 *B_local_decode, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
}
}
template <typename T1, typename T2>
__device__ void decode_i2s_to_f16(T1 *_i2s, T2 *B_local_decode, const int N = 8)
{
decode_i2b_to_f16<T1, T2, true>(_i2s, B_local_decode, N);
}
template <typename T1, typename T2>
__device__ void decode_i2u_to_f16(T1 *_i2u, T2 *B_local_decode, const int N = 8)
{
decode_i2b_to_f16<T1, T2, false>(_i2u, B_local_decode, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale(T1 *_i2s, T2 *B_local_decode, T3 *scale = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2s_to_f16_scale(T1 *_i2s, T2 *B_local_decode, T3 *scale, const int N = 8)
{
decode_i2b_to_f16_scale<T1, T2, T3, true>(_i2s, B_local_decode, scale, N);
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2u_to_f16_scale(T1 *_i2u, T2 *B_local_decode, T3 *scale, const int N = 8)
{
decode_i2b_to_f16_scale<T1, T2, T3, false>(_i2u, B_local_decode, scale, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale_zeros_rescale(T1 *_i2s, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2u_to_f16_scale_zeros_rescale(T1 *_i2u, T2 *B_local_decode, T3 *scale, T3 *zeros, const int N = 8)
{
decode_i2b_to_f16_scale_zeros_rescale<T1, T2, T3, false>(_i2u, B_local_decode, scale, zeros, N);
}
template <typename T1, typename T2, typename T3, bool isSigned = false>
__device__ void decode_i2b_to_f16_scale_zeros_original(T1 *_i2s, T2 *B_local_decode, T3 *scale = nullptr, T3 *zeros = nullptr, const int N = 8)
{
uint *h = reinterpret_cast<uint *>(B_local_decode);
static constexpr uint immLut = (0xf0 & 0xcc) | 0xaa;
static constexpr uint BOTTOM_MASK = 0x00030003;
static constexpr uint FP16_TOP_MAGIC_NUM = 0x64006400;
static constexpr uint MEDIAN_NUM = isSigned ? 0x64016401 : 0x64006400;
int16_t const i2s_i16 = *reinterpret_cast<int16_t *>(_i2s);
// decode 2 elems at one time.
// interleave {e15,e13,e11,e9,e7,e5,e3,e1,e14,e12,e10,e8,e6,e4,e2,e0}
// only decode for {x,x,x,x,e7,e5,e3,e1,x,x,x,x,e6,e4,e2,e0}
// otherwise the pointer of _i2s should be moved to
int i2s = (i2s_i16 & 0x00ff);
i2s |= ((i2s_i16 & 0xff00) << 8);
#pragma unroll
for (int i = 0; i < (N / 2); i++)
{
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
: "=r"(h[i])
: "r"(i2s >> (2 * i)), "n"(BOTTOM_MASK), "n"(FP16_TOP_MAGIC_NUM), "n"(immLut));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(MEDIAN_NUM));
asm volatile("sub.f16x2 %0, %1, %2;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*zeros, *zeros)));
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(h[i]) : "r"(h[i]), "r"(__pack_half2(*scale, *scale)), "r"(0));
}
}
template <typename T1, typename T2, typename T3>
__device__ void decode_i2u_to_f16_scale_zeros_original(T1 *_i2u, T2 *B_local_decode, T3 *scale, T3 *zeros, const int N = 8)
{
decode_i2b_to_f16_scale_zeros_original<T1, T2, T3, false>(_i2u, B_local_decode, scale, zeros, N);
}
// Pack two half values.
static inline __device__ __host__ unsigned
__pack_half2(const half x, const half y) {
unsigned v0 = *((unsigned short *)&x);
unsigned v1 = *((unsigned short *)&y);
return (v1 << 16) | v0;
}
int ladder_gemm_fp16xint2_fp16(half *input_0, half *input_1, half *output, const int M, const int N, const int K, const int trans_a, const int trans_b, half *workspace_ptr);
#endif
|
BitBLAS/integration/bitdistiller/kenrel_output/ladder_kernel.h/0
|
{
"file_path": "BitBLAS/integration/bitdistiller/kenrel_output/ladder_kernel.h",
"repo_id": "BitBLAS",
"token_count": 6264
}
| 146 |
Please checkout https://github.com/LeiWang1999/vllm-bitblas for details currently. The relative pull request to the official vLLM is still under construction.
|
BitBLAS/integration/vLLM/README.md/0
|
{
"file_path": "BitBLAS/integration/vLLM/README.md",
"repo_id": "BitBLAS",
"token_count": 41
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from tvm.target import Target
from .arch_base import TileDevice
# For LLVM Backend, we do not provide the detailed information of the CPU
# As the LLVM backend do not required tuning, just maintain the consistency
class CPU(TileDevice):
def __init__(self, target: Target):
self.target = target
device = tvm.runtime.cpu(0)
if not device.exist:
raise RuntimeError("Cannot find cpu device 0.")
self.device: tvm.runtime.Device = device
self.platform: str = "CPU"
|
BitBLAS/python/bitblas/base/roller/arch/cpu.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/arch/cpu.py",
"repo_id": "BitBLAS",
"token_count": 202
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .operator import (
global_operator_cache, # noqa: F401
load_global_ops_cache, # noqa: F401
get_database_path, # noqa: F401
set_database_path, # noqa: F401
)
|
BitBLAS/python/bitblas/cache/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/cache/__init__.py",
"repo_id": "BitBLAS",
"token_count": 96
}
| 149 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring, invalid-name
"""A GEMM schedule rule for GPU operators."""
from typing import Literal, Optional
from tvm import DataType, tir
from tvm.target import Target
from ..base.roller.rasterization import NoRasterization
from ..base import analysis
from .base import GPUScheduleRule
from .matmul_analysis import (
auto_inline_consumer_chain,
auto_inline_producers,
get_index_map,
get_reduction_blocks,
normalize_to_matmul,
)
class MatmulTensorizationWMMA(GPUScheduleRule):
"""
The schedule rule for float16 tensor core matmul computation.
func with attr 'dlight.do_not_tensorize' will not be tensorized.
"""
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Optional[tir.Schedule]:
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
block_stmt = sch.get(main_block)
index_maps = get_index_map(block_stmt)
if index_maps is None:
return None
matmul_index_map, a_index_map, b_index_map, c_index_map = index_maps
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
block_m = 128
block_n = 128
block_k = 32
# tensor core intrinsic size
micro_size_m = 16
micro_size_n = 16
micro_size_k = 16
thread_z = 2
thread_y = 2
warp_size = 32
vector_size = 8
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]
block = sch.reindex(main_block, ("read", 0))
sch.transform_layout(block, ("write", 0), a_index_map)
block = sch.reindex(main_block, ("read", 1))
sch.transform_layout(block, ("write", 0), b_index_map)
block = sch.reindex(main_block, ("write", 0))
sch.transform_layout(block, ("read", 0), c_index_map)
sch.transform_block_layout(main_block, matmul_index_map)
# Step 2. Padding for dynamic shape kernels
# # Step 2.1 Swizzle for l2, for better performance on inputs exceeding l2 size
# # Get input shape
batch, i, j, k = sch.get_loops(main_block)
# input_b, input_m, input_n, input_k = [sch.get(loop).extent for loop in [batch, i, j, k]]
# # Get input/output dtype
dtype_a, dtype_b = [DataType(region.buffer.dtype) for region in sch.get(main_block).reads]
dtype_c = DataType(sch.get(main_block).writes[0].buffer.dtype)
# dtype_a_bytes, dtype_b_bytes = [math.ceil(d.bits / 8) for d in [dtype_a, dtype_b]]
# # Get l2 size
# l2_size = target.l2_cache_size_bytes
# # Analyse swizzle factor
# def get_swizzle_factor(l2_size, input_k, dtype_bytes, input_spatial, block_size):
# if l2_size != 0 and isinstance(input_k, (int, tir.IntImm)):
# # div by 3: suppose the two inputs and the output uses the same amount of l2
# swizzle_factor = l2_size / 3 / int(input_k) / dtype_bytes / block_size
# # optimization: try find the best swizzle factor (aka the least additional padding)
# if isinstance(input_spatial, (int, tir.IntImm)):
# block_cnt = math.ceil(int(input_spatial) / block_size)
# swizzle_factor = math.ceil(block_cnt / math.ceil(block_cnt / swizzle_factor))
# else:
# swizzle_factor = math.floor(swizzle_factor)
# return [None, swizzle_factor]
# else:
# return [4, None]
# swizzle_factor_m = get_swizzle_factor(l2_size, input_k, dtype_a_bytes, input_m, block_m)
# swizzle_factor_n = get_swizzle_factor(l2_size, input_k, dtype_b_bytes, input_n, block_n)
swizzle_factor_m = [4, None]
swizzle_factor_n = [4, None]
# Step 2.2 Add padding
sch.pad_einsum(
main_block,
[
1,
(swizzle_factor_m[0] or swizzle_factor_m[1]) * block_m,
(swizzle_factor_n[0] or swizzle_factor_n[1]) * block_n,
block_k,
],
)
# Step 3. Reorder loops for tiling
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_m])
j, j_inner = sch.split(j, factors=[None, micro_size_n])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = main_block
block_outer = sch.blockize(i_inner)
# split factors for i, j, and k
in_wrap_block_cnt_m = block_m // thread_z // micro_size_m
in_wrap_block_cnt_n = block_n // thread_y // micro_size_n
in_wrap_block_cnt_k = block_k // micro_size_k
i_factors = swizzle_factor_m + [thread_z, in_wrap_block_cnt_m]
j_factors = swizzle_factor_n + [thread_y, in_wrap_block_cnt_n]
k_factors = [None, in_wrap_block_cnt_k]
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, factors=k_factors)
sch.reorder(i0, j0, i1, j1, k0, i2, j2, k1, i3, j3)
block_axis = sch.fuse(batch, i0, j0, i1, j1)
sch.bind(block_axis, "blockIdx.x")
sch.bind(i2, "threadIdx.z")
sch.bind(j2, "threadIdx.y")
# Step 4. Read to/write from shared mem, and from/to wmma fragments
def fetch_input(block_outer, read_buffer_idx, tensor_name: Literal["A", "B"], wmma_name):
block_read = sch.cache_read(block_outer, read_buffer_idx, "shared.dyn")
sch.compute_at(block_read, k0)
fused = sch.fuse(*sch.get_loops(block_read)[-2:])
f0, f1, f2, f3, f4 = sch.split(fused,
[None, thread_z, thread_y, warp_size, vector_size])
sch.bind(f1, "threadIdx.z")
sch.bind(f2, "threadIdx.y")
sch.bind(f3, "threadIdx.x")
sch.vectorize(f4)
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=8)
auto_inline_producers(sch, block_read)
wmma_read = sch.cache_read(block_outer, read_buffer_idx, wmma_name)
sch.compute_at(wmma_read, k1)
micro_size_spatial = micro_size_m if tensor_name == "A" else micro_size_n
v0, v1 = sch.get_loops(wmma_read)[-2:]
sch.split(v0, factors=[None, micro_size_spatial])
return wmma_read
wmma_read_a = fetch_input(block_outer, 0, [block_m, block_k, micro_size_m, micro_size_k],
"wmma.matrix_a")
wmma_read_b = fetch_input(block_outer, 1, [block_n, block_k, micro_size_n, micro_size_k],
"wmma.matrix_b")
def store_output(block_outer, write_buffer_idx, wmma_name):
block_write = sch.cache_write(block_outer, write_buffer_idx, "shared.dyn")
sch.reverse_compute_at(block_write, block_axis)
fused = sch.fuse(*sch.get_loops(block_write)[-2:])
f0, f1, f2, f3, f4 = sch.split(fused,
[None, thread_z, thread_y, warp_size, vector_size])
sch.bind(f1, "threadIdx.z")
sch.bind(f2, "threadIdx.y")
sch.bind(f3, "threadIdx.x")
sch.vectorize(f4)
# sch.storage_align(block_write, 0, axis=-2, factor=128, offset=16)
auto_inline_consumer_chain(sch, block_write)
wmma_store = sch.cache_write(block_outer, write_buffer_idx, wmma_name)
v0, v1 = sch.get_loops(wmma_store)[-2:]
v00, v01, v02 = sch.split(v0, factors=[thread_z, None, micro_size_m])
v10, v11, v12 = sch.split(v1, factors=[thread_y, None, micro_size_n])
sch.reorder(v00, v10, v01, v11, v02, v12)
sch.bind(v00, "threadIdx.z")
sch.bind(v10, "threadIdx.y")
return wmma_store
wmma_store = store_output(block_outer, 0, "wmma.accumulator")
block_init = sch.decompose_reduction(block_outer, k0)
block_init_inner = sch.get_child_blocks(block_init)[0]
# unroll k
sch.unroll(k0)
# Step 5. Schedule tensor core computation
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_wmma_intrin_group,)
intrin_group = get_wmma_intrin_group(
load_scope="shared.dyn",
store_scope="shared.dyn",
in_dtype=str(dtype_a),
out_dtype=str(dtype_c),
trans_b=True,
)
sch.tensorize(sch.get_loops(block_init_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(wmma_read_a)[-2], intrin_group["load_a"])
sch.tensorize(sch.get_loops(wmma_read_b)[-2], intrin_group["load_b"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
sch.tensorize(sch.get_loops(wmma_store)[-2], intrin_group["store"])
return sch
class MatmulInt8Tensorization(GPUScheduleRule):
"""
The schedule rule for int8 tensor core matmul computation.
func with attr 'dlight.do_not_tensorize' will not be tensorized.
"""
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Optional[tir.Schedule]:
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_wmma_intrin_group,)
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
block_stmt = sch.get(main_block)
index_maps = get_index_map(block_stmt)
if index_maps is None:
return None
matmul_index_map, a_index_map, b_index_map, c_index_map = index_maps
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
# tensor core intrinsic size
micro_size_x = 16
micro_size_y = 16
micro_size_k = 16
warp_size = 32
vector_size = 4
i_factors, j_factors, k_factors = (
[None, 1, 4, 2],
[1, None, 4, 2],
[None, 1],
)
num_ty = i_factors[2] * j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]
block = sch.reindex(main_block, ("read", 0))
sch.transform_layout(block, ("write", 0), a_index_map)
block = sch.reindex(main_block, ("read", 1))
sch.transform_layout(block, ("write", 0), b_index_map)
block = sch.reindex(main_block, ("write", 0))
sch.transform_layout(block, ("read", 0), c_index_map)
sch.transform_block_layout(main_block, matmul_index_map)
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.annotate(k0, "software_pipeline_order", [0, 3, 1, 4, 5, 2, 6])
sch.annotate(k0, "software_pipeline_stage", [0, 0, 0, 0, 0, 1, 1])
sch.annotate(k1, "software_pipeline_order", [0, 1, 2])
sch.annotate(k1, "software_pipeline_stage", [0, 0, 1])
sch.reorder(i0, j0, i1, j1, j2, i2, k0, k1, i3, j3)
block_idx = sch.fuse(i0, j0)
block_idy = sch.fuse(i1, j1)
thread_idy = sch.fuse(j2, i2)
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
def fetch_to_shared(block, idx, ndim):
block_read = sch.cache_read(block, idx, "shared.dyn")
sch.compute_at(block_read, k0)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
_, f_1, f_2, f_3 = sch.split(fused, factors=[None, num_ty, warp_size, vector_size])
sch.bind(f_2, "threadIdx.x")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_3)
sch.storage_align(block_read, 0, axis=-2, factor=32, offset=16)
sch.annotate(block_read, "tir.manifest_shared_memory_local_stage", 1)
sch.annotate(block_read, "double_buffer_scope", 0)
return block_read
a_g2s = fetch_to_shared(block_outer, 0, 2)
b_g2s = fetch_to_shared(block_outer, 1, 2)
auto_inline_producers(sch, a_g2s)
auto_inline_producers(sch, b_g2s)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "wmma.matrix_a")
B_mat = sch.cache_read(block_outer, 1, "wmma.matrix_b")
sch.compute_at(A_mat, k1)
sch.compute_at(B_mat, k1)
# create write cache to store matrix from wmma fragments to shared memory and global memory
accumulator_shared_to_global = sch.cache_write(block_outer, 0, "shared.dyn")
sch.storage_align(accumulator_shared_to_global, 0, -2, 16, 4)
store = sch.cache_write(block_outer, 0, "wmma.accumulator")
sch.reverse_compute_at(store, thread_idy)
sch.reverse_compute_at(accumulator_shared_to_global, thread_idy)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
intrin_group = get_wmma_intrin_group(
load_scope="shared.dyn",
store_scope="shared.dyn",
in_dtype="int8",
out_dtype="int32",
trans_b=True,
)
try:
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
sch.unroll(i0)
sch.unroll(j0)
sch.tensorize(i1, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
sch.unroll(i0)
sch.unroll(j0)
sch.tensorize(i1, intrin_group["load_b"])
except Exception: # pylint: disable=bare-except
return None
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
try:
tensorize_init_store_compute()
except Exception: # pylint: disable=bare-except
return None
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-2:])
_, f1, f2 = sch.split(fused, factors=[None, warp_size, vector_size])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
return sch
class MatmulTensorizationLegacy(GPUScheduleRule):
"""
The schedule rule for float16 tensor core matmul computation.
func with attr 'dlight.do_not_tensorize' will not be tensorized.
"""
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Optional[tir.Schedule]:
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_wmma_intrin_group,)
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
block_stmt = sch.get(main_block)
index_maps = get_index_map(block_stmt)
if index_maps is None:
return None
matmul_index_map, a_index_map, b_index_map, c_index_map = index_maps
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
# tensor core intrinsic size
micro_size_x = 16
micro_size_y = 16
micro_size_k = 16
warp_size = 32
vector_size = 4
i_factors, j_factors, k_factors = (
[None, 1, 4, 2],
[1, None, 4, 2],
[None, 4],
)
num_ty = i_factors[2] * j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]
block = sch.reindex(main_block, ("read", 0))
sch.transform_layout(block, ("write", 0), a_index_map)
block = sch.reindex(main_block, ("read", 1))
sch.transform_layout(block, ("write", 0), b_index_map)
block = sch.reindex(main_block, ("write", 0))
sch.transform_layout(block, ("read", 0), c_index_map)
sch.transform_block_layout(main_block, matmul_index_map)
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.annotate(k0, "software_pipeline_order", [0, 3, 1, 4, 5, 2, 6])
sch.annotate(k0, "software_pipeline_stage", [0, 0, 0, 0, 0, 1, 1])
sch.annotate(k1, "software_pipeline_order", [0, 1, 2])
sch.annotate(k1, "software_pipeline_stage", [0, 0, 1])
sch.reorder(i0, j0, i1, j1, j2, i2, k0, k1, i3, j3)
block_idx = sch.fuse(i0, j0)
block_idy = sch.fuse(i1, j1)
thread_idy = sch.fuse(j2, i2)
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
def fetch_to_shared(block, idx, ndim):
block_read = sch.cache_read(block, idx, "shared.dyn")
sch.compute_at(block_read, k0)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
_, f_1, f_2, f_3 = sch.split(fused, factors=[None, num_ty, warp_size, vector_size])
sch.bind(f_2, "threadIdx.x")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_3)
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=8)
sch.annotate(block_read, "tir.manifest_shared_memory_local_stage", 1)
sch.annotate(block_read, "double_buffer_scope", 0)
return block_read
a_g2s = fetch_to_shared(block_outer, 0, 2)
b_g2s = fetch_to_shared(block_outer, 1, 2)
auto_inline_producers(sch, a_g2s)
auto_inline_producers(sch, b_g2s)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "wmma.matrix_a")
B_mat = sch.cache_read(block_outer, 1, "wmma.matrix_b")
sch.compute_at(A_mat, k1)
sch.compute_at(B_mat, k1)
# create write cache to store matrix from wmma fragments to shared memory and global memory
accumulator_shared_to_global = sch.cache_write(block_outer, 0, "shared.dyn")
sch.storage_align(accumulator_shared_to_global, 0, -2, 16, 4)
store = sch.cache_write(block_outer, 0, "wmma.accumulator")
sch.reverse_compute_at(store, thread_idy)
sch.reverse_compute_at(accumulator_shared_to_global, thread_idy)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
intrin_group = get_wmma_intrin_group(
load_scope="shared.dyn",
store_scope="shared.dyn",
in_dtype="float16",
out_dtype="float32",
trans_b=True,
)
try:
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
sch.unroll(i0)
sch.unroll(j0)
sch.tensorize(i1, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
sch.unroll(i0)
sch.unroll(j0)
sch.tensorize(i1, intrin_group["load_b"])
except Exception: # pylint: disable=bare-except
return None
# Try to tensorize the init, store and compute block with f16 or f32 intrinsics
tensorize_success: bool = False
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
try:
tensorize_init_store_compute()
tensorize_success = True
except Exception: # pylint: disable=bare-except
intrin_group = get_wmma_intrin_group(
load_scope="shared.dyn",
store_scope="shared.dyn",
in_dtype="float16",
out_dtype="float16",
trans_b=True,
)
if not tensorize_success:
try:
tensorize_init_store_compute()
tensorize_success = True
except Exception: # pylint: disable=bare-except
return None
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-2:])
_, f1, f2 = sch.split(fused, factors=[None, warp_size, vector_size])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
return sch if tensorize_success else None
def apply_config( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
config,
) -> Optional[tir.Schedule]:
from tvm.tir.tensor_intrin.cuda import ( # pylint: disable=import-outside-toplevel
get_wmma_intrin_group,)
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
if func.attrs is not None and "dlight.do_not_tensorize" in func.attrs.keys():
return None
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
# Start Schedule
# Step 0. Get schedule config.
# NOTE: we can analyze the config by the hardware spec in the future
# tensor core intrinsic size
intrin_info = config.intrin_info
warp_row_tiles = config.warp[0]
warp_col_tiles = config.warp[1]
block_row_warps = config.block[0] // warp_row_tiles
block_col_warps = config.block[1] // warp_col_tiles
stage = config.pipeline_stage
use_async = config.use_async
chunk = config.rstep[0]
micro_size_x = 16
micro_size_y = 16
micro_size_k = 16
warp_size = 32
i_factors, j_factors, k_factors = (
[None, 1, block_row_warps, warp_row_tiles // micro_size_x],
[1, None, block_col_warps, warp_col_tiles // micro_size_y],
[None, chunk // micro_size_k],
)
num_ty = i_factors[2] * j_factors[2]
x_pad_factor = i_factors[2] * i_factors[3]
y_pad_factor = j_factors[2] * j_factors[3]
k_pad_factor = k_factors[1]
# Step 1. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]/B[S, K, J]
if not (func.attrs is not None and "dlight.tensorcore_prenormlized" in func.attrs.keys()):
sch = normalize_to_matmul(sch, main_block, ["a", "a", "a"])
# Step 2. Padding for dynamic shape kernels
sch.pad_einsum(
main_block,
[
1,
micro_size_x * x_pad_factor,
micro_size_y * y_pad_factor,
micro_size_k * k_pad_factor,
],
)
# Step 3. Schedule matmul to use tensor core
block = main_block
batch, i, j, k = sch.get_loops(block)
# inner loops for tensor core computation
i, i_inner = sch.split(i, factors=[None, micro_size_x])
j, j_inner = sch.split(j, factors=[None, micro_size_y])
k, k_inner = sch.split(k, factors=[None, micro_size_k])
sch.reorder(i, j, k, i_inner, j_inner, k_inner)
block_inner = block
block_outer = sch.blockize(i_inner)
i0, i1, i2, i3 = sch.split(i, factors=i_factors)
j0, j1, j2, j3 = sch.split(j, factors=j_factors)
k0, k1 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, j2, i2, k0, k1, i3, j3)
block_idx = sch.fuse(i0, j0)
block_idy = sch.fuse(i1, j1)
thread_idy = sch.fuse(j2, i2)
# plan rasteration
if (not isinstance(config.rasterization_plan, NoRasterization) and
sch.get(batch).extent.value == 1):
device_func, invoke_func = config.rasterization_plan.get_code()
factor = config.rasterization_plan.panel_width_
# TODO(lei): this is a trick for rasterization implementation
# wait for https://github.com/apache/tvm/pull/16113 to be merged
# require a solution for general block rasterization
factor = 8 # should be divisible by block_idy
if sch.get(block_idy).extent.value % factor == 0:
block_k, block_idy = sch.split(block_idy, factors=[None, factor])
sch.bind(block_k, "blockIdx.z")
else:
sch.bind(batch, "blockIdx.z")
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
def fetch_to_shared(block, idx, ndim, vec_len, dtype="float16"):
block_read = sch.cache_read(block, idx, "shared.dyn")
sch.compute_at(block_read, k0)
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
_, f_1, f_2, f_3 = sch.split(fused, factors=[None, num_ty, warp_size, vec_len])
sch.bind(f_2, "threadIdx.x")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_3)
offset: int = 0
if dtype == "float16":
offset = 8
elif dtype == "int8":
offset = 16
# todo(lei): the pad value should be varied according to the data type
sch.storage_align(block_read, 0, axis=-2, factor=16, offset=offset)
return block_read
a_g2s = fetch_to_shared(
block_outer,
0,
2,
vec_len=list(config.vectorize.values())[0],
dtype=intrin_info.in_dtype,
)
b_g2s = fetch_to_shared(
block_outer,
1,
2,
vec_len=list(config.vectorize.values())[1],
dtype=intrin_info.in_dtype,
)
auto_inline_producers(sch, a_g2s)
auto_inline_producers(sch, b_g2s)
# create read cache to load matrix from shared memory to wmma fragments
A_mat = sch.cache_read(block_outer, 0, "wmma.matrix_a")
B_mat = sch.cache_read(block_outer, 1, "wmma.matrix_b")
sch.compute_at(A_mat, k1)
sch.compute_at(B_mat, k1)
# create write cache to store matrix from wmma fragments to shared memory and global memory
accumulator_shared_to_global = sch.cache_write(block_outer, 0, "shared.dyn")
sch.storage_align(accumulator_shared_to_global, 0, -2, 16, 4)
store = sch.cache_write(block_outer, 0, "wmma.accumulator")
sch.reverse_compute_at(store, thread_idy)
sch.reverse_compute_at(accumulator_shared_to_global, thread_idy)
# split the store loop to match hardware intrinsic pattern
i, j = sch.get_loops(store)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
block_init_c = sch.decompose_reduction(block_outer, k0)
block_init_c_inner = sch.get_child_blocks(block_init_c)[0]
# Tensorization by hardware intrinsics
intrin_group = get_wmma_intrin_group(
load_scope="shared.dyn",
store_scope="shared.dyn",
in_dtype=intrin_info.in_dtype,
out_dtype=intrin_info.out_dtype,
trans_b=intrin_info.trans_b,
)
try:
i, j = sch.get_loops(A_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
sch.unroll(i0)
sch.unroll(j0)
sch.tensorize(i1, intrin_group["load_a"])
i, j = sch.get_loops(B_mat)[-2:]
i0, i1 = sch.split(i, factors=[None, 16])
j0, j1 = sch.split(j, factors=[None, 16])
sch.reorder(i0, j0, i1, j1)
sch.unroll(i0)
sch.unroll(j0)
sch.tensorize(i1, intrin_group["load_b"])
except Exception: # pylint: disable=bare-except
return None
# Try to tensorize the init, store and compute block with f16 or f32 intrinsics
tensorize_success: bool = False
def tensorize_init_store_compute():
sch.tensorize(sch.get_loops(block_init_c_inner)[-2], intrin_group["init"])
sch.tensorize(sch.get_loops(store)[-2], intrin_group["store"])
sch.tensorize(sch.get_loops(block_inner)[-3], intrin_group["compute"])
try:
tensorize_init_store_compute()
tensorize_success = True
except Exception: # pylint: disable=bare-except
return None
auto_inline_consumer_chain(sch, accumulator_shared_to_global)
fused = sch.fuse(*sch.get_loops(accumulator_shared_to_global)[-2:])
_, f1, f2 = sch.split(
fused, factors=[None, warp_size, max(list(config.vectorize.values()))])
sch.bind(f1, "threadIdx.x")
sch.vectorize(f2)
if stage > 1:
sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, stage - 1])
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
if use_async:
sch.annotate(k0, "software_pipeline_async_stages", [0])
return sch if tensorize_success else None
|
BitBLAS/python/bitblas/gpu/matmul_wmma.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/matmul_wmma.py",
"repo_id": "BitBLAS",
"token_count": 17200
}
| 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from tvm.target import Target
from typing import Literal, Union
from .operator import Operator
from .impl.lop3_permutate_impl import select_implementation
from dataclasses import dataclass
import torch
@dataclass(frozen=True)
class LOP3PermutateConfig:
M: int
N: int
datatype: Literal["float16", "int8"] = "float16"
storage_dtype: Literal["int8", "uint8", "int32", "uint32"] = "int32"
dequantize_bits: int = 4
class LOP3Permutate(Operator):
def __init__(
self,
config: LOP3PermutateConfig,
name: str = "permutate",
target: Union[str, Target] = "llvm", # assume to do permutation on cpu.
):
# consider to warp the arguments to MatmulConfig
super().__init__(name, config, target)
if target.kind.name != "llvm":
raise ValueError("Currently only support llvm target for Permutation")
self.target = target
self._build_runtime_module(target)
def _select_implementation(self):
return select_implementation(
M=self.M,
N=self.N,
datatype=self.datatype,
dequantize_bits=self.dequantize_bits,
)
def forward(self, weight, res):
# reinterpret the input tensor to int32 format
args = [arg.view(torch.int32) for arg in [weight, res]]
self.torch_func(*args)
return args[-1].view(weight.dtype)
@property
def M(self):
return self.config.M
@property
def N(self):
return self.config.N
@property
def datatype(self):
return self.config.datatype
@property
def storage_dtype(self):
return self.config.storage_dtype
@property
def dequantize_bits(self):
return self.config.dequantize_bits
__all__ = ["LOP3Permutate", "LOP3PermutateConfig"]
|
BitBLAS/python/bitblas/ops/lop3_permutate.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/lop3_permutate.py",
"repo_id": "BitBLAS",
"token_count": 814
}
| 151 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from typing import Union
from enum import IntEnum
import numpy as np
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
from math import prod
from tvm.relay import TensorType
from tvm._ffi.base import _LIB, c_str
from tvm._ffi._ctypes.types import TVMValue, check_call
from tvm._ffi.runtime_ctypes import (
TVMArrayHandle,)
import ctypes
TVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_c_str_dltensor = c_str("dltensor")
_c_str_used_dltensor = c_str("used_dltensor")
def get_values_from_torch_tensors(tensors, num_args):
values = (TVMValue * num_args)()
dlpack_tensors = [to_dlpack(torch_tensor) for torch_tensor in tensors]
for i, dltensor in enumerate(dlpack_tensors):
dltensor = ctypes.py_object(dltensor)
if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
# enforce type to make sure it works for all ctypes
ptr = ctypes.cast(ptr, ctypes.c_void_p)
handle = TVMArrayHandle()
check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
# ndarray = tvm.runtime.ndarray._make_array(handle, False, False)
ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
values[i].v_handle = ctypes.cast(handle, ctypes.c_void_p)
else:
raise ValueError("Invalid DLTensor")
return values
class TensorSupplyType(IntEnum):
Integer = 1
Uniform = 2
Normal = 3
Randn = 4
Zero = 5
One = 6
def get_tensor_supply(supply_type: TensorSupplyType, opt_shapes: dict = None):
def var_wrapper(v, opt_shapes):
if isinstance(v, tvm.tir.Var):
assert opt_shapes
assert v.name in opt_shapes
return opt_shapes[v.name]
elif isinstance(v, tvm.tir.IntImm):
return v.value
else:
raise RuntimeError("Not supported type: ", type(v))
def get_tensor(tensor: TensorType) -> torch.Tensor:
dtype = torch.__getattribute__(str(tensor.dtype))
device = torch.cuda.current_device()
shape = [var_wrapper(i, opt_shapes) for i in tensor.shape]
if supply_type == TensorSupplyType.Integer:
return torch.randint(low=-2, high=3, size=shape, device=device, dtype=dtype)
elif supply_type == TensorSupplyType.Uniform:
return torch.empty(*shape, device=device, dtype=dtype).uniform_(-1.0, 1.0)
elif supply_type == TensorSupplyType.Normal:
return torch.empty(*shape, device=device, dtype=dtype).normal_(-1.0, 1.0)
elif supply_type == TensorSupplyType.Randn:
return torch.randn(*shape, device=device).to(dtype)
elif supply_type == TensorSupplyType.Zero:
return torch.zeros(*shape, device=device, dtype=dtype)
elif supply_type == TensorSupplyType.One:
return torch.ones(*shape, device=device, dtype=dtype)
else:
raise NotImplementedError(supply_type)
return get_tensor
def tvm_tensor_to_torch(tensor: Union[tvm.te.Tensor, tvm.nd.NDArray]):
if isinstance(tensor, tvm.te.Tensor):
return torch.from_numpy(tensor.numpy())
elif isinstance(tensor, tvm.nd.NDArray):
return from_dlpack(tensor)
else:
raise RuntimeError("Not supported type: ", type(tensor))
def lazy_tvm_tensor_to_torch(tensor: Union[tvm.te.Tensor, tvm.nd.NDArray]):
# It additionally needs the ctypes type as torch type
def as_tensor(address, shape, elems_inbytes, torch_type):
arr = (ctypes.c_int8 * elems_inbytes).from_address(
address)
return torch.frombuffer(arr, dtype=torch_type).view(*shape)
if isinstance(tensor, tvm.nd.NDArray):
np_array = tensor.asnumpy()
shape = np_array.shape
dtype = np_array.dtype
torch_dtype = getattr(torch, str(dtype))
num_elems_inbytes = prod(shape) * np_array.itemsize
data_ptr = np_array.ctypes.data
tensor = as_tensor(data_ptr, shape, num_elems_inbytes, torch_dtype)
return tensor
else:
raise RuntimeError("Not supported type: ", type(tensor))
def lazy_torch_to_tvm_tensor(tensor):
# It additionally needs the ctypes type as torch type
def as_tensor(address, shape, elems_inbytes, numpy_type):
arr = (ctypes.c_int8 * elems_inbytes).from_address(
address)
return np.frombuffer(arr, dtype=numpy_type).reshape(shape)
if isinstance(tensor, torch.Tensor):
data_ptr = tensor.data_ptr()
shape = tensor.shape
torch_dtype = tensor.dtype
numpy_dtype = str(torch_dtype).replace("torch.", "")
num_elems_inbytes = prod(shape) * tensor.itemsize
np_tensor = as_tensor(data_ptr, shape, num_elems_inbytes, numpy_dtype)
tvm_tensor = tvm.nd.array(np_tensor)
return tvm_tensor
else:
raise RuntimeError("Not supported type: ", type(tensor))
|
BitBLAS/python/bitblas/utils/tensor_adapter.py/0
|
{
"file_path": "BitBLAS/python/bitblas/utils/tensor_adapter.py",
"repo_id": "BitBLAS",
"token_count": 2316
}
| 152 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pytest
import bitblas
from bitblas import MatmulConfig, Matmul
import logging
from bitblas import set_log_level
set_log_level(logging.DEBUG)
@pytest.mark.parametrize(
"M,N,K,A_dtype,W_dtype,accum_dtype,out_dtype,layout,with_bias,group_size,with_scaling,with_zeros,zeros_mode",
[
(1, 1024, 1024, "e4m3_float8", "e4m3_float8", "float32", "float32", "nt", None, None, None, None,
None),
(1024, 1024, 1024, "e4m3_float8", "e4m3_float8", "float32", "float32", "nt", None, None, None, None,
None),
(1, 1024, 1024, "e5m2_float8", "e5m2_float8", "float32", "float32", "nt", None, None, None, None,
None),
(1024, 1024, 1024, "e5m2_float8", "e5m2_float8", "float32", "float32", "nt", None, None, None, None,
None),
],
)
def test_matmul_torch_forward(M, N, K, A_dtype, W_dtype, accum_dtype, out_dtype, layout, with_bias,
group_size, with_scaling, with_zeros, zeros_mode):
import torch
torch.random.manual_seed(0)
matmul_config = MatmulConfig(
M=M,
N=N,
K=K,
A_dtype=A_dtype,
W_dtype=W_dtype,
accum_dtype=accum_dtype,
out_dtype=out_dtype,
layout=layout,
with_bias=with_bias,
group_size=group_size,
with_scaling=with_scaling,
with_zeros=with_zeros,
zeros_mode=zeros_mode,
)
matmul = Matmul(config=matmul_config, enable_tuning=True)
input_shape = (M, K)
weight_shape = (N, K) if layout == "nt" else (K, N)
def map_torch_type(intype):
typemap = {
'e4m3_float8': torch.float8_e4m3fn,
'e5m2_float8': torch.float8_e5m2,
}
if intype in typemap:
return typemap[intype]
else:
return getattr(torch, intype)
numpytype_a = map_torch_type(A_dtype)
numpytype_b = map_torch_type(W_dtype)
numpytype_c = map_torch_type(out_dtype)
torch_a = torch.rand(M*K).uniform_(-5, 5).reshape(input_shape).type(numpytype_a).cuda()
torch_b = torch.rand(N*K).uniform_(-5, 5).reshape(weight_shape).type(numpytype_b).cuda()
ref_out = torch.matmul(torch_a.to(torch.float32), torch_b.t().to(torch.float32)) if layout == "nt" else torch.matmul(torch_a.to(torch.float32), torch_b.to(torch.float32))
ref_out = ref_out.to(numpytype_c)
print("torch_ref_out", ref_out)
new_torch_b = matmul.transform_weight(torch_b)
bitblas_out = matmul(torch_a, new_torch_b)
print("bitblas_out", bitblas_out)
# fmt: on
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/testing/python/operators/test_general_matmul_fp8.py/0
|
{
"file_path": "BitBLAS/testing/python/operators/test_general_matmul_fp8.py",
"repo_id": "BitBLAS",
"token_count": 1367
}
| 153 |
date ; hostname ; pwd
export MASTER_ADDR=$HOSTNAME
export MASTER_PORT=19800
export NODE_RANK=0
EXP_LR_ARRAY=(1e-5 2e-5 1e-5 2e-5)
EXP_BS_ARRAY=(16 16 32 32)
EXP_TN_ARRAY1=(cola mrpc sst2 qqp)
EXP_TN_ARRAY2=(stsb rte qnli mnli)
EXP_CUDA_INDEX1=(0 1 2 3)
EXP_CUDA_INDEX2=(4 5 6 7)
EXP_MASTER_PORT1=(19800 19801 19802 19803)
EXP_MASTER_PORT2=(19804 19805 19806 19807)
for task in {0..3}
do
task1=${EXP_TN_ARRAY1[$task]}
task2=${EXP_TN_ARRAY2[$task]}
for i in {0..3}
do
EXP_LR=${EXP_LR_ARRAY[$i]}
EXP_BS=${EXP_BS_ARRAY[$i]}
EXP_PGB=$EXP_BS
echo ${EXP_CUDA_INDEX1[$i]}, ${EXP_MASTER_PORT1[$i]}, $NODE_RANK, $EXP_BS, $EXP_LR, $task1
echo ${EXP_CUDA_INDEX2[$i]}, ${EXP_MASTER_PORT2[$i]}, $NODE_RANK, $EXP_BS, $EXP_LR, $task2
RUN_NAME=""$EXP_BS"_"$EXP_LR""
CUDA_VISIBLE_DEVICES=${EXP_CUDA_INDEX1[$i]} MASTER_PORT=${EXP_MASTER_PORT1[$i]} python run_glue.py with run_name=$RUN_NAME learning_rate=$EXP_LR batch_size=$EXP_BS per_gpu_batchsize=$EXP_PGB group_name=$task1 >/dev/null &
CUDA_VISIBLE_DEVICES=${EXP_CUDA_INDEX2[$i]} MASTER_PORT=${EXP_MASTER_PORT2[$i]} python run_glue.py with run_name=$RUN_NAME learning_rate=$EXP_LR batch_size=$EXP_BS per_gpu_batchsize=$EXP_PGB group_name=$task2 >/dev/null &
done
wait
for i in {0..3}
do
EXP_LR=${EXP_LR_ARRAY[$i]}
EXP_BS=${EXP_BS_ARRAY[$i]}
EXP_PGB=$EXP_BS
echo ${EXP_CUDA_INDEX1[$i]}, ${EXP_MASTER_PORT1[$i]}, $NODE_RANK, $EXP_BS, $EXP_LR, $task1
echo ${EXP_CUDA_INDEX2[$i]}, ${EXP_MASTER_PORT2[$i]}, $NODE_RANK, $EXP_BS, $EXP_LR, $task2
RUN_NAME=""$EXP_BS"_"$EXP_LR""
CUDA_VISIBLE_DEVICES=${EXP_CUDA_INDEX1[$i]} MASTER_PORT=${EXP_MASTER_PORT1[$i]} python run_glue.py with run_name=$RUN_NAME learning_rate=$EXP_LR batch_size=$EXP_BS per_gpu_batchsize=$EXP_PGB group_name=$task1 load_flag=True >/dev/null &
CUDA_VISIBLE_DEVICES=${EXP_CUDA_INDEX2[$i]} MASTER_PORT=${EXP_MASTER_PORT2[$i]} python run_glue.py with run_name=$RUN_NAME learning_rate=$EXP_LR batch_size=$EXP_BS per_gpu_batchsize=$EXP_PGB group_name=$task2 load_flag=True >/dev/null &
done
wait
date
done
|
BridgeTower/scripts/ftfpt_glue.sh/0
|
{
"file_path": "BridgeTower/scripts/ftfpt_glue.sh",
"repo_id": "BridgeTower",
"token_count": 1102
}
| 154 |
from ..datasets import NLVR2Dataset
from .datamodule_base import BaseDataModule
class NLVR2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return NLVR2Dataset
@property
def dataset_name(self):
return "nlvr2"
|
BridgeTower/src/datamodules/nlvr2_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/nlvr2_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 142
}
| 155 |
import torch
from torchmetrics import Metric
class Accuracy(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("correct", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, logits, target):
logits, target = (
logits.detach().to(self.correct.device),
target.detach().to(self.correct.device),
)
preds = logits.argmax(dim=-1)
preds = preds[target != -100]
target = target[target != -100]
if target.numel() == 0:
return 1
assert preds.shape == target.shape
self.correct += torch.sum(preds == target)
self.total += target.numel()
def compute(self):
return self.correct / self.total
class Scalar(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("scalar", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, scalar):
if isinstance(scalar, torch.Tensor):
scalar = scalar.detach().to(self.scalar.device)
else:
scalar = torch.tensor(scalar).float().to(self.scalar.device)
self.scalar += scalar
self.total += 1
def compute(self):
return self.scalar / self.total
class VQAScore(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("score", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, logits, target):
logits, target = (
logits.detach().float().to(self.score.device),
target.detach().float().to(self.score.device),
)
logits = torch.max(logits, 1)[1]
one_hots = torch.zeros(*target.size()).to(target)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = one_hots * target
self.score += scores.sum()
self.total += len(logits)
def compute(self):
return self.score / self.total
|
BridgeTower/src/gadgets/my_metrics.py/0
|
{
"file_path": "BridgeTower/src/gadgets/my_metrics.py",
"repo_id": "BridgeTower",
"token_count": 1078
}
| 156 |
from .utils import (
inception_normalize,
imagenet_normalize,
MinMaxResize,
)
from PIL import Image
from torchvision import transforms
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, RandomResizedCrop, RandomHorizontalFlip
from .randaug import RandAugment
from .randaugment import RandomAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def imagenet_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
def imagenet_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def vit_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
def vit_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def clip_transform(size):
return Compose([
Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def clip_transform_randaug(size):
trs = Compose([
Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
trs.transforms.insert(0, lambda image: image.convert('RGBA'))
trs.transforms.insert(0, RandAugment(2, 9))
trs.transforms.insert(0, lambda image: image.convert('RGB'))
return trs
def blip_transform(size):
return Compose([
Resize((size, size), interpolation=transforms.InterpolationMode.BICUBIC),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def blip_transform_randaug_pretrain(size):
return Compose([
RandomResizedCrop(size, scale=(0.2, 1.0),interpolation=transforms.InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
RandomAugment(2, 5, isPIL=True, augs=['Identity','AutoContrast','Brightness','Sharpness','Equalize',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
lambda image: Image.fromarray(image).convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def blip_transform_randaug(size):
return Compose([
RandomResizedCrop(size, scale=(0.5, 1.0),interpolation=transforms.InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
RandomAugment(2, 5, isPIL=True, augs=['Identity','AutoContrast','Brightness','Sharpness','Equalize',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
lambda image: Image.fromarray(image).convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def blip_transform_randaug_wc(size): # wc: with color changes
return Compose([
RandomResizedCrop(size, scale=(0.5, 1.0),interpolation=transforms.InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
RandAugment(2, 7),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def blip_transform_randaug_wohf(size): # remove horizontal flip for vqa
return Compose([
RandomResizedCrop(size, scale=(0.5, 1.0),interpolation=transforms.InterpolationMode.BICUBIC),
# RandomHorizontalFlip(),
RandomAugment(2, 5, isPIL=True, augs=['Identity','AutoContrast','Brightness','Sharpness','Equalize',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
lambda image: Image.fromarray(image).convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
|
BridgeTower/src/transforms/transform.py/0
|
{
"file_path": "BridgeTower/src/transforms/transform.py",
"repo_id": "BridgeTower",
"token_count": 2767
}
| 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import gc
import json
import os
import time
import warnings
import numpy as np
import torch
import torch.nn.functional as F
import torchvision as tv
from PIL import Image, ImageFile
from detection_models import networks
from detection_util.util import *
warnings.filterwarnings("ignore", category=UserWarning)
ImageFile.LOAD_TRUNCATED_IMAGES = True
def data_transforms(img, full_size, method=Image.BICUBIC):
if full_size == "full_size":
ow, oh = img.size
h = int(round(oh / 16) * 16)
w = int(round(ow / 16) * 16)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
elif full_size == "scale_256":
ow, oh = img.size
pw, ph = ow, oh
if ow < oh:
ow = 256
oh = ph / pw * 256
else:
oh = 256
ow = pw / ph * 256
h = int(round(oh / 16) * 16)
w = int(round(ow / 16) * 16)
if (h == ph) and (w == pw):
return img
return img.resize((w, h), method)
def scale_tensor(img_tensor, default_scale=256):
_, _, w, h = img_tensor.shape
if w < h:
ow = default_scale
oh = h / w * default_scale
else:
oh = default_scale
ow = w / h * default_scale
oh = int(round(oh / 16) * 16)
ow = int(round(ow / 16) * 16)
return F.interpolate(img_tensor, [ow, oh], mode="bilinear")
def blend_mask(img, mask):
np_img = np.array(img).astype("float")
return Image.fromarray((np_img * (1 - mask) + mask * 255.0).astype("uint8")).convert("RGB")
def main(config):
print("initializing the dataloader")
model = networks.UNet(
in_channels=1,
out_channels=1,
depth=4,
conv_num=2,
wf=6,
padding=True,
batch_norm=True,
up_mode="upsample",
with_tanh=False,
sync_bn=True,
antialiasing=True,
)
## load model
checkpoint_path = os.path.join(os.path.dirname(__file__), "checkpoints/detection/FT_Epoch_latest.pt")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model_state"])
print("model weights loaded")
if config.GPU >= 0:
model.to(config.GPU)
else:
model.cpu()
model.eval()
## dataloader and transformation
print("directory of testing image: " + config.test_path)
imagelist = os.listdir(config.test_path)
imagelist.sort()
total_iter = 0
P_matrix = {}
save_url = os.path.join(config.output_dir)
mkdir_if_not(save_url)
input_dir = os.path.join(save_url, "input")
output_dir = os.path.join(save_url, "mask")
# blend_output_dir=os.path.join(save_url, 'blend_output')
mkdir_if_not(input_dir)
mkdir_if_not(output_dir)
# mkdir_if_not(blend_output_dir)
idx = 0
results = []
for image_name in imagelist:
idx += 1
print("processing", image_name)
scratch_file = os.path.join(config.test_path, image_name)
if not os.path.isfile(scratch_file):
print("Skipping non-file %s" % image_name)
continue
scratch_image = Image.open(scratch_file).convert("RGB")
w, h = scratch_image.size
transformed_image_PIL = data_transforms(scratch_image, config.input_size)
scratch_image = transformed_image_PIL.convert("L")
scratch_image = tv.transforms.ToTensor()(scratch_image)
scratch_image = tv.transforms.Normalize([0.5], [0.5])(scratch_image)
scratch_image = torch.unsqueeze(scratch_image, 0)
_, _, ow, oh = scratch_image.shape
scratch_image_scale = scale_tensor(scratch_image)
if config.GPU >= 0:
scratch_image_scale = scratch_image_scale.to(config.GPU)
else:
scratch_image_scale = scratch_image_scale.cpu()
with torch.no_grad():
P = torch.sigmoid(model(scratch_image_scale))
P = P.data.cpu()
P = F.interpolate(P, [ow, oh], mode="nearest")
tv.utils.save_image(
(P >= 0.4).float(),
os.path.join(
output_dir,
image_name[:-4] + ".png",
),
nrow=1,
padding=0,
normalize=True,
)
transformed_image_PIL.save(os.path.join(input_dir, image_name[:-4] + ".png"))
gc.collect()
torch.cuda.empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--checkpoint_name', type=str, default="FT_Epoch_latest.pt", help='Checkpoint Name')
parser.add_argument("--GPU", type=int, default=0)
parser.add_argument("--test_path", type=str, default=".")
parser.add_argument("--output_dir", type=str, default=".")
parser.add_argument("--input_size", type=str, default="scale_256", help="resize_256|full_size|scale_256")
config = parser.parse_args()
main(config)
|
Bringing-Old-Photos-Back-to-Life/Global/detection.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/detection.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 2295
}
| 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
# for displays
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
# for training
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
# self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--training_dataset',type=str,default='',help='training use which dataset')
# for discriminators
self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
self.parser.add_argument('--l2_feat', type=float, help='weight for feature mapping loss')
self.parser.add_argument('--use_l1_feat', action='store_true', help='use l1 for feat mapping')
self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--gan_type', type=str, default='lsgan', help='Choose the loss type of GAN')
self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--norm_D',type=str, default='spectralinstance', help='instance normalization or batch normalization')
self.parser.add_argument('--init_D',type=str,default='xavier',help='normal|xavier|xavier_uniform|kaiming|orthogonal|none')
self.parser.add_argument('--no_TTUR',action='store_true',help='No TTUR')
self.parser.add_argument('--start_epoch',type=int,default=-1,help='write the start_epoch of iter.txt into this parameter')
self.parser.add_argument('--no_degradation',action='store_true',help='when train the mapping, enable this parameter --> no degradation will be added into clean image')
self.parser.add_argument('--no_load_VAE',action='store_true',help='when train the mapping, enable this parameter --> random initialize the encoder an decoder')
self.parser.add_argument('--use_v2_degradation',action='store_true',help='enable this parameter --> 4 kinds of degradations will be used to synthesize corruption')
self.parser.add_argument('--use_vae_which_epoch',type=str,default='200')
self.parser.add_argument('--use_focal_loss',action='store_true')
self.parser.add_argument('--mask_need_scale',action='store_true',help='enable this param means that the pixel range of mask is 0-255')
self.parser.add_argument('--positive_weight',type=float,default=1.0,help='(For scratch detection) Since the scratch number is less, and we use a weight strategy. This parameter means that we want to decrease the weight.')
self.parser.add_argument('--no_update_lr',action='store_true',help='use this means we do not update the LR while training')
self.isTrain = True
|
Bringing-Old-Photos-Back-to-Life/Global/options/train_options.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/options/train_options.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1766
}
| 159 |
from __future__ import annotations
from pathlib import Path
import warnings
warnings.filterwarnings("ignore")
import random
import torchaudio
import collections
import re
import numpy as np
from transformers import AutoTokenizer, logging
from .models.clap import CLAP
from .models.mapper import get_clapcap
import math
import torchaudio.transforms as T
import os
import torch
import argparse
import yaml
import sys
from huggingface_hub.file_download import hf_hub_download
logging.set_verbosity_error()
class CLAPWrapper():
"""
A class for interfacing CLAP model.
"""
model_repo = "microsoft/msclap"
model_name = {
'2022': 'CLAP_weights_2022.pth',
'2023': 'CLAP_weights_2023.pth',
'clapcap': 'clapcap_weights_2023.pth'
}
def __init__(self, model_fp: Path | str | None = None, version: str = '2023', use_cuda=False):
# Check if version is supported
self.supported_versions = self.model_name.keys()
if version not in self.supported_versions:
raise ValueError(f"The version {version} is not supported. The supported versions are {str(self.supported_versions)}")
self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
self.file_path = os.path.realpath(__file__)
self.default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
self.config_as_str = (Path(__file__).parent / f"configs/config_{version}.yml").read_text()
# Automatically download model if not provided
if not model_fp:
model_fp = hf_hub_download(self.model_repo, self.model_name[version])
self.model_fp = model_fp
self.use_cuda = use_cuda
if 'clapcap' in version:
self.clapcap, self.tokenizer, self.args = self.load_clapcap()
else:
self.clap, self.tokenizer, self.args = self.load_clap()
def read_config_as_args(self,config_path,args=None,is_config_str=False):
return_dict = {}
if config_path is not None:
if is_config_str:
yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
else:
with open(config_path, "r") as f:
yml_config = yaml.load(f, Loader=yaml.FullLoader)
if args != None:
for k, v in yml_config.items():
if k in args.__dict__:
args.__dict__[k] = v
else:
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
else:
for k, v in yml_config.items():
return_dict[k] = v
args = args if args != None else return_dict
return argparse.Namespace(**args)
def load_clap(self):
r"""Load CLAP model with args from config file"""
args = self.read_config_as_args(self.config_as_str, is_config_str=True)
if 'roberta' in args.text_model or 'clip' in args.text_model or 'gpt' in args.text_model:
self.token_keys = ['input_ids', 'attention_mask']
elif 'bert' in args.text_model:
self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
clap = CLAP(
audioenc_name=args.audioenc_name,
sample_rate=args.sampling_rate,
window_size=args.window_size,
hop_size=args.hop_size,
mel_bins=args.mel_bins,
fmin=args.fmin,
fmax=args.fmax,
classes_num=args.num_classes,
out_emb=args.out_emb,
text_model=args.text_model,
transformer_embed_dim=args.transformer_embed_dim,
d_proj=args.d_proj
)
# Load pretrained weights for model
model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
# We unwrap the DDP model and save. If the model is not unwrapped and saved, then the model needs to unwrapped before `load_state_dict`:
# Reference link: https://discuss.pytorch.org/t/how-to-load-dataparallel-model-which-trained-using-multiple-gpus/146005
clap.load_state_dict(model_state_dict, strict=False)
clap.eval() # set clap in eval mode
tokenizer = AutoTokenizer.from_pretrained(args.text_model)
if 'gpt' in args.text_model:
tokenizer.add_special_tokens({'pad_token': '!'})
if self.use_cuda and torch.cuda.is_available():
clap = clap.cuda()
return clap, tokenizer, args
def load_clapcap(self):
r"""Load CLAP model with args from config file"""
args = self.read_config_as_args(self.config_as_str, is_config_str=True)
args.prefix_dim = args.d_proj
text_model = args.text_model
args.text_model = args.text_decoder
args.cross_attention = True if 'cross' in args.clapcap_model.lower() else False
if 'roberta' in args.text_model or 'clip' in args.text_model or 'gpt' in args.text_model:
self.token_keys = ['input_ids', 'attention_mask']
elif 'bert' in args.text_model:
self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
clap = CLAP(
audioenc_name=args.audioenc_name,
sample_rate=args.sampling_rate,
window_size=args.window_size,
hop_size=args.hop_size,
mel_bins=args.mel_bins,
fmin=args.fmin,
fmax=args.fmax,
classes_num=args.num_classes,
out_emb=args.out_emb,
text_model=text_model,
transformer_embed_dim=args.transformer_embed_dim,
d_proj=args.d_proj
)
clapcap = get_clapcap(args.clapcap_model)(clap, args.text_decoder, args.prefix_length, args.prefix_length_clip, args.prefix_dim,
args.num_layers, args.normalize_prefix, args.mapping_type, True, True)
model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
clapcap.load_state_dict(model_state_dict, strict=False)
clapcap.eval() # set clap in eval mode
tokenizer = AutoTokenizer.from_pretrained(args.text_model)
if 'gpt' in args.text_model:
tokenizer.add_special_tokens({'pad_token': '!'})
if self.use_cuda and torch.cuda.is_available():
clapcap = clapcap.cuda()
return clapcap, tokenizer, args
def default_collate(self, batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(
self.default_collate_err_msg_format.format(elem.dtype))
return self.default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, str):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: self.default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(self.default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError(
'each element in list of batch should be of equal size')
transposed = zip(*batch)
return [self.default_collate(samples) for samples in transposed]
raise TypeError(self.default_collate_err_msg_format.format(elem_type))
def read_audio(self, audio_path, resample=True):
r"""Loads audio file or array and returns a torch tensor"""
# Randomly sample a segment of audio_duration from the clip or pad to match duration
audio_time_series, sample_rate = torchaudio.load(audio_path)
resample_rate = self.args.sampling_rate
if resample and resample_rate != sample_rate:
resampler = T.Resample(sample_rate, resample_rate)
audio_time_series = resampler(audio_time_series)
return audio_time_series, resample_rate
def load_audio_into_tensor(self, audio_path, audio_duration, resample=False):
r"""Loads audio file and returns raw audio."""
# Randomly sample a segment of audio_duration from the clip or pad to match duration
audio_time_series, sample_rate = self.read_audio(audio_path, resample=resample)
audio_time_series = audio_time_series.reshape(-1)
# audio_time_series is shorter than predefined audio duration,
# so audio_time_series is extended
if audio_duration*sample_rate >= audio_time_series.shape[0]:
repeat_factor = int(np.ceil((audio_duration*sample_rate) /
audio_time_series.shape[0]))
# Repeat audio_time_series by repeat_factor to match audio_duration
audio_time_series = audio_time_series.repeat(repeat_factor)
# remove excess part of audio_time_series
audio_time_series = audio_time_series[0:audio_duration*sample_rate]
else:
# audio_time_series is longer than predefined audio duration,
# so audio_time_series is trimmed
start_index = random.randrange(
audio_time_series.shape[0] - audio_duration*sample_rate)
audio_time_series = audio_time_series[start_index:start_index +
audio_duration*sample_rate]
return torch.FloatTensor(audio_time_series)
def preprocess_audio(self, audio_files, resample):
r"""Load list of audio files and return raw audio"""
audio_tensors = []
for audio_file in audio_files:
audio_tensor = self.load_audio_into_tensor(
audio_file, self.args.duration, resample)
audio_tensor = audio_tensor.reshape(
1, -1).cuda() if self.use_cuda and torch.cuda.is_available() else audio_tensor.reshape(1, -1)
audio_tensors.append(audio_tensor)
return self.default_collate(audio_tensors)
def preprocess_text(self, text_queries):
r"""Load list of class labels and return tokenized text"""
tokenized_texts = []
for ttext in text_queries:
if 'gpt' in self.args.text_model:
ttext = ttext + ' <|endoftext|>'
tok = self.tokenizer.encode_plus(
text=ttext, add_special_tokens=True, max_length=self.args.text_len, padding='max_length', return_tensors="pt")
for key in self.token_keys:
tok[key] = tok[key].reshape(-1).cuda() if self.use_cuda and torch.cuda.is_available() else tok[key].reshape(-1)
tokenized_texts.append(tok)
return self.default_collate(tokenized_texts)
def get_text_embeddings(self, class_labels):
r"""Load list of class labels and return text embeddings"""
preprocessed_text = self.preprocess_text(class_labels)
return self._get_text_embeddings(preprocessed_text)
def get_audio_embeddings(self, audio_files, resample=True):
r"""Load list of audio files and return a audio embeddings"""
preprocessed_audio = self.preprocess_audio(audio_files, resample)
return self._get_audio_embeddings(preprocessed_audio)
def _get_text_embeddings(self, preprocessed_text):
r"""Load preprocessed text and return text embeddings"""
with torch.no_grad():
return self.clap.caption_encoder(preprocessed_text)
def _get_audio_embeddings(self, preprocessed_audio):
r"""Load preprocessed audio and return a audio embeddings"""
with torch.no_grad():
preprocessed_audio = preprocessed_audio.reshape(
preprocessed_audio.shape[0], preprocessed_audio.shape[2])
#Append [0] the audio emebdding, [1] has output class probabilities
return self.clap.audio_encoder(preprocessed_audio)[0]
def _generic_batch_inference(self, func, *args):
r"""Process audio and/or text per batch"""
input_tmp = args[0]
batch_size = args[-1]
# args[0] has audio_files, args[1] has class_labels
inputs = [args[0], args[1]] if len(args) == 3 else [args[0]]
args0_len = len(args[0])
# compute text_embeddings once for all the audio_files batches
if len(inputs) == 2:
text_embeddings = self.get_text_embeddings(args[1])
inputs = [args[0], args[1], text_embeddings]
dataset_idx = 0
for _ in range(math.ceil(args0_len/batch_size)):
next_batch_idx = dataset_idx + batch_size
# batch size is bigger than available audio/text items
if next_batch_idx >= args0_len:
inputs[0] = input_tmp[dataset_idx:]
yield func(*tuple(inputs))
else:
inputs[0] = input_tmp[dataset_idx:next_batch_idx]
yield func(*tuple(inputs))
dataset_idx = next_batch_idx
def get_audio_embeddings_per_batch(self, audio_files, batch_size):
r"""Load preprocessed audio and return a audio embeddings per batch"""
return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size)
def get_text_embeddings_per_batch(self, class_labels, batch_size):
r"""Load preprocessed text and return text embeddings per batch"""
return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size)
def compute_similarity(self, audio_embeddings, text_embeddings):
r"""Compute similarity between text and audio embeddings"""
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
logit_scale = self.clap.logit_scale.exp()
similarity = logit_scale*text_embeddings @ audio_embeddings.T
return similarity.T
def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size):
r"""Compute classification probabilities for each audio recording in a batch and each class label"""
return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size)
def generate_caption(self, audio_files, resample=True, beam_size: int = 5, entry_length=67, temperature=1.):
r"""Generate audio captions for each audio recording in a batch"""
captions = []
audio_tensors = self.preprocess_audio(audio_files, resample)
with torch.no_grad():
prefix = self.clapcap.clap(audio_tensors.squeeze(1))[0]
if self.args.normalize_prefix:
prefix = prefix / prefix.norm(2, -1).reshape(-1,1)
prefix_embed = self.clapcap.clap_project(prefix).view(-1, self.args.prefix_length, self.clapcap.gpt.transformer.wte.weight.shape[1])
for i in range(len(audio_tensors)):
gen_caption = self._generate_beam(embed=prefix_embed[i].unsqueeze(0),\
beam_size=beam_size,\
entry_length=entry_length,\
temperature=temperature)[0]
captions.append(gen_caption.capitalize())
return captions
def _generate_beam(self, beam_size: int = 5, prompt=None, embed=None,
entry_length=67, temperature=1., stop_token: str = ' <|endoftext|>'):
r"""Generate captions by beam search decoding"""
self.clapcap.eval()
stop_token_index = self.tokenizer.encode(stop_token)[0]
tokens = None
scores = None
device = next(self.clapcap.parameters()).device
seq_lengths = torch.ones(beam_size, device=device)
is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool)
with torch.no_grad():
if embed is not None:
generated = embed
else:
if tokens is None:
tokens = torch.tensor(self.tokenizer.encode(prompt))
tokens = tokens.unsqueeze(0).to(device)
generated = self.clapcap.gpt.transformer.wte(tokens)
for i in range(entry_length):
outputs = self.clapcap.gpt(inputs_embeds=generated)
logits = outputs.logits
logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
logits = logits.softmax(-1).log()
if scores is None:
scores, next_tokens = logits.topk(beam_size, -1)
generated = generated.expand(beam_size, *generated.shape[1:])
next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0)
if tokens is None:
tokens = next_tokens
else:
tokens = tokens.expand(beam_size, *tokens.shape[1:])
tokens = torch.cat((tokens, next_tokens), dim=1)
else:
logits[is_stopped] = -float(np.inf)
logits[is_stopped, 0] = 0
scores_sum = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
scores_sum_average = scores_sum / seq_lengths[:, None]
scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(beam_size, -1)
next_tokens_source = next_tokens // scores_sum.shape[1]
seq_lengths = seq_lengths[next_tokens_source]
next_tokens = next_tokens % scores_sum.shape[1]
next_tokens = next_tokens.unsqueeze(1)
tokens = tokens[next_tokens_source]
tokens = torch.cat((tokens, next_tokens), dim=1)
generated = generated[next_tokens_source]
scores = scores_sum_average * seq_lengths
is_stopped = is_stopped[next_tokens_source]
next_token_embed = self.clapcap.gpt.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1)
generated = torch.cat((generated, next_token_embed), dim=1)
is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze()
if is_stopped.all():
break
scores = scores / seq_lengths
output_list = tokens.cpu().numpy()
output_texts = [self.tokenizer.decode(output[:int(length)]) for output, length in zip(output_list, seq_lengths)]
order = scores.argsort(descending=True)
output_texts = [output_texts[i] for i in order]
return output_texts
|
CLAP/msclap/CLAPWrapper.py/0
|
{
"file_path": "CLAP/msclap/CLAPWrapper.py",
"repo_id": "CLAP",
"token_count": 9467
}
| 160 |
.. role:: hidden
:class: hidden-section
.. _optimizers:
Optimizers
==========
Optimizers update the Model parameters based on the gradients.
.. automodule:: fairseq.optim
:members:
.. autoclass:: fairseq.optim.FairseqOptimizer
:members:
:undoc-members:
.. autoclass:: fairseq.optim.adadelta.Adadelta
:members:
:undoc-members:
.. autoclass:: fairseq.optim.adagrad.Adagrad
:members:
:undoc-members:
.. autoclass:: fairseq.optim.adafactor.FairseqAdafactor
:members:
:undoc-members:
.. autoclass:: fairseq.optim.adam.FairseqAdam
:members:
:undoc-members:
.. autoclass:: fairseq.optim.fp16_optimizer.FP16Optimizer
:members:
:undoc-members:
.. autoclass:: fairseq.optim.nag.FairseqNAG
:members:
:undoc-members:
.. autoclass:: fairseq.optim.sgd.SGD
:members:
:undoc-members:
|
COCO-LM/fairseq/docs/optim.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/optim.rst",
"repo_id": "COCO-LM",
"token_count": 346
}
| 161 |
#!/usr/bin/python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
import hashlib
import sys
from multiprocessing import Pool
def get_hashes_and_lines(raw_line):
hash = hashlib.md5(raw_line).hexdigest()
return hash, raw_line
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--workers", type=int, default=10)
parser.add_argument("files", nargs="*", help="input files")
args = parser.parse_args()
seen = set()
with fileinput.input(args.files, mode="rb") as h:
pool = Pool(args.workers)
results = pool.imap_unordered(get_hashes_and_lines, h, 1000)
for i, (hash, raw_line) in enumerate(results):
if hash not in seen:
seen.add(hash)
sys.stdout.buffer.write(raw_line)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/backtranslation/deduplicate_lines.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/backtranslation/deduplicate_lines.py",
"repo_id": "COCO-LM",
"token_count": 511
}
| 162 |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from sacremoses.normalize import MosesPunctNormalizer
def main(args):
normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn)
for line in sys.stdin:
print(normalizer.normalize(line.rstrip()), flush=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--lang", "-l", default="en")
parser.add_argument("--penn", "-p", action="store_true")
args = parser.parse_args()
main(args)
|
COCO-LM/fairseq/examples/constrained_decoding/normalize.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/constrained_decoding/normalize.py",
"repo_id": "COCO-LM",
"token_count": 241
}
| 163 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from .noisy_channel_beam_search import NoisyChannelBeamSearch
from fairseq.sequence_generator import EnsembleModel
class NoisyChannelSequenceGenerator(object):
def __init__(
self,
combine_method,
tgt_dict,
src_dict=None,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
len_penalty=1.0,
unk_penalty=0.0,
retain_dropout=False,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
normalize_scores=True,
channel_models=None,
k2=10,
ch_weight=1.0,
channel_scoring_type='log_norm',
top_k_vocab=0,
lm_models=None,
lm_dict=None,
lm_weight=1.0,
normalize_lm_scores_by_tgt_len=False,
):
"""Generates translations of a given source sentence,
using beam search with noisy channel decoding.
Args:
combine_method (string, optional): Method to combine direct, LM and
channel model scores (default: None)
tgt_dict (~fairseq.data.Dictionary): target dictionary
src_dict (~fairseq.data.Dictionary): source dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
no_repeat_ngram_size (int, optional): Size of n-grams that we avoid
repeating in the generation (default: 0)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
channel_models (List[~fairseq.models.FairseqModel]): ensemble of models
translating from the target to the source
k2 (int, optional): Top K2 candidates to score per beam at each step (default:10)
ch_weight (int, optional): Weight associated with the channel model score
assuming that the direct model score has weight 1.0 (default: 1.0)
channel_scoring_type (str, optional): String specifying how to score
the channel model (default: 'log_norm')
top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or
`'src_vocab_batched'`, then this parameter specifies the number of
most frequent tokens to include in the channel model output vocabulary,
in addition to the source tokens in the input batch (default: 0)
lm_models (List[~fairseq.models.FairseqModel]): ensemble of models
generating text in the target language
lm_dict (~fairseq.data.Dictionary): LM Model dictionary
lm_weight (int, optional): Weight associated with the LM model score
assuming that the direct model score has weight 1.0 (default: 1.0)
normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores
by the target length? By default, we normalize the combination of
LM and channel model scores by the source length
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
self.channel_models = channel_models
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.combine_method = combine_method
self.k2 = k2
self.ch_weight = ch_weight
self.channel_scoring_type = channel_scoring_type
self.top_k_vocab = top_k_vocab
self.lm_models = lm_models
self.lm_dict = lm_dict
self.lm_weight = lm_weight
self.log_softmax_fn = torch.nn.LogSoftmax(dim=1)
self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len
self.share_tgt_dict = (self.lm_dict == self.tgt_dict)
self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict)
self.ch_scoring_bsz = 3072
assert temperature > 0, '--temperature must be greater than 0'
self.search = NoisyChannelBeamSearch(tgt_dict)
@torch.no_grad()
def generate(
self,
models,
sample,
prefix_tokens=None,
bos_token=None,
**kwargs
):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
"""
model = EnsembleModel(models)
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(model.models_size)
],
)
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths_no_eos.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
src_lengths = encoder_input['src_lengths']
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
# reorder source tokens so they may be used as a reference in generating P(S|T)
src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index)
src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len)
src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1)
attn, attn_buf = None, None
nonpad_idxs = None
# The cands_to_ignore indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
fw scores for each hypothesis
combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing
combined noisy channel scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths_no_eos[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i][nonpad_idxs[sent]]
_, alignment = hypo_attn.max(dim=0)
else:
hypo_attn = None
alignment = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k):
"""Rescore the top k hypothesis from each beam using noisy channel modeling
Returns:
new_fw_lprobs: the direct model probabilities after pruning the top k
new_ch_lm_lprobs: the combined channel and language model probabilities
new_lm_lprobs: the language model probabilities after pruning the top k
"""
with torch.no_grad():
lprobs_size = lprobs.size()
if prefix_tokens is not None and step < prefix_tokens.size(1):
probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1).data
).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1)
cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1)
# need to calculate and save fw and lm probs for prefix tokens
fw_top_k = cand_scores
fw_top_k_idx = cand_indices
k = 1
else:
# take the top k best words for every sentence in batch*beam
fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k)
eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0]
ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0)
src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype)
if self.combine_method != "lm_only":
temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1)
not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index
cur_tgt_size = step+2
# add eos to all candidate sentences except those that already end in eos
eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1)
eos_tokens[eos_idx] = self.tgt_dict.pad_index
if step == 0:
channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1)
else:
# move eos from beginning to end of target sentence
channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1)
ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size))
ch_input_lengths[eos_idx] = cur_tgt_size-1
if self.channel_scoring_type == "unnormalized":
ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths)
ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True)
del ch_encoder_output
ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:])
ch_intermed_scores = ch_intermed_scores.float()
ch_intermed_scores *= not_padding.float()
ch_scores = torch.sum(ch_intermed_scores, dim=1)
elif self.channel_scoring_type == "k2_separate":
for k_idx in range(k):
k_eos_tokens = eos_tokens[k_idx::k, :]
if step == 0:
k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1)
else:
# move eos from beginning to end of target sentence
k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1)
k_ch_input_lengths = ch_input_lengths[k_idx::k]
k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens)
k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True)
k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2)
k_ch_intermed_scores *= not_padding.float()
ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1)
elif self.channel_scoring_type == "src_vocab":
ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths)
ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True)
del ch_encoder_output
ch_lprobs = normalized_scores_with_batch_vocab(
channel_model.decoder,
ch_decoder_output, src_tokens, k, bsz, beam_size,
self.src_dict.pad_index, top_k=self.top_k_vocab)
ch_scores = torch.sum(ch_lprobs, dim=1)
elif self.channel_scoring_type == "src_vocab_batched":
ch_bsz_size = temp_src_tokens_full.shape[0]
ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz))
for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)):
end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size)
temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :]
channel_input_batch = channel_input[start_idx:end_idx, :]
ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx]
ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch)
ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True)
ch_lprobs_list[i] = normalized_scores_with_batch_vocab(
channel_model.decoder,
ch_decoder_output_batch, src_tokens, k, bsz, beam_size,
self.src_dict.pad_index, top_k=self.top_k_vocab,
start_idx=start_idx, end_idx=end_idx)
ch_lprobs = torch.cat(ch_lprobs_list, dim=0)
ch_scores = torch.sum(ch_lprobs, dim=1)
else:
ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full)
ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True)
ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1)
ch_intermed_scores *= not_padding.float()
ch_scores = torch.sum(ch_intermed_scores, dim=1)
else:
cur_tgt_size = 0
ch_scores = ch_scores.view(bsz*beam_size, k)
expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten()
if self.share_tgt_dict:
lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k)
else:
new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm)
new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm)
lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k)
lm_scores.add_(expanded_lm_prefix_scores)
ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size)
# initialize all as min value
new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_fw_lprobs[:, self.pad] = -math.inf
new_ch_lm_lprobs[:, self.pad] = -math.inf
new_lm_lprobs[:, self.pad] = -math.inf
new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k)
new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores)
new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k))
return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs
def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size):
if self.channel_scoring_type == "unnormalized":
ch_scores = self.log_softmax_fn(
ch_scores.view(-1, self.beam_size * self.k2)
).view(ch_scores.shape)
ch_scores = ch_scores * self.ch_weight
lm_scores1 = lm_scores1 * self.lm_weight
if combine_type == "lm_only":
# log P(T|S) + log P(T)
ch_scores = lm_scores1.view(ch_scores.size())
elif combine_type == "noisy_channel":
# 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T)
if self.normalize_lm_scores_by_tgt_len:
ch_scores.div_(src_size)
lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size)
ch_scores.add_(lm_scores_norm)
# 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T)
else:
ch_scores.add_(lm_scores1.view(ch_scores.size()))
ch_scores.div_(src_size)
return ch_scores
if self.channel_models is not None:
channel_model = self.channel_models[0] # assume only one channel_model model
else:
channel_model = None
lm = EnsembleModel(self.lm_models)
lm_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(lm.models_size)
],
)
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lm.reorder_incremental_state(lm_incremental_states, reorder_state)
fw_lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature,
)
fw_lprobs[:, self.pad] = -math.inf # never select pad
fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2)
# handle min and max length constraints
if step >= max_len:
fw_lprobs[:, :self.eos] = -math.inf
fw_lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
fw_lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1):
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_mask = prefix_toks.ne(self.pad)
prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
fw_lprobs[prefix_mask] = -math.inf
fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs
)
prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
ch_lm_lprobs[prefix_mask] = -math.inf
ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs
)
prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
lm_lprobs[prefix_mask] = -math.inf
lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim)
ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim)
lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
nonpad_idxs = src_tokens.ne(self.pad)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(fw_lprobs)
scores_buf = scores_buf.type_as(fw_lprobs)
self.search.set_src_lengths(src_lengths_no_eos)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step(
step,
fw_lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size),
lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for candidates to be ignored)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size] &= ~cands_to_ignore
# only consider eos when it's among the top beam_size indices
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size]
)
combined_noisy_channel_eos_scores = torch.masked_select(
combined_noisy_channel_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
)
# finalize hypo using channel model score
finalized_sents = finalize_hypos(
step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = torch.nonzero(batch_mask).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs]
fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths_no_eos = src_lengths_no_eos[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze()
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# ignored hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
eos_mask[:, :beam_size] |= cands_to_ignore
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_cands_to_ignore, active_hypos)
)
# update cands_to_ignore to ignore any finalized hypos
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
assert (~cands_to_ignore).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
fw_lprobs_top_k, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
fw_lprobs_top_k, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
torch.gather(
lm_lprobs_top_k, dim=1, index=active_hypos,
out=lm_prefix_scores.view(bsz, beam_size)
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k):
with torch.no_grad():
lm_lprobs, avg_attn_scores = model.forward_decoder(
input_tokens, encoder_outs=None, incremental_states=incremental_states,
)
lm_lprobs_size = lm_lprobs.size(0)
probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1)
return probs_next_wrd
def make_dict2dict(old_dict, new_dict):
dict2dict_map = {}
for sym in old_dict.symbols:
dict2dict_map[old_dict.index(sym)] = new_dict.index(sym)
return dict2dict_map
def dict2dict(tokens, dict2dict_map):
if tokens.device == torch.device('cpu'):
tokens_tmp = tokens
else:
tokens_tmp = tokens.cpu()
return tokens_tmp.map_(
tokens_tmp,
lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)]
).to(tokens.device)
def reorder_tokens(tokens, lengths, eos):
# reorder source tokens so they may be used as reference for P(S|T)
return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0)
def reorder_all_tokens(tokens, lengths, eos):
# used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>]
# so source tokens can be used to predict P(S|T)
return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)])
def normalized_scores_with_batch_vocab(
model_decoder, features, target_ids, k, bsz, beam_size,
pad_idx, top_k=0, vocab_size_meter=None, start_idx=None,
end_idx=None, **kwargs):
"""
Get normalized probabilities (or log probs) from a net's output
w.r.t. vocab consisting of target IDs in the batch
"""
if model_decoder.adaptive_softmax is None:
weight = model_decoder.output_projection.weight
vocab_ids = torch.unique(
torch.cat(
(torch.unique(target_ids), torch.arange(top_k, device=target_ids.device))
)
)
id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids))))
mapped_target_ids = target_ids.cpu().apply_(
lambda x, id_map=id_map: id_map[x]
).to(target_ids.device)
expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1)
if start_idx is not None and end_idx is not None:
expanded_target_ids = expanded_target_ids[start_idx:end_idx, :]
logits = F.linear(features, weight[vocab_ids, :])
log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32)
intermed_scores = torch.gather(
log_softmax[:, :-1, :],
2,
expanded_target_ids[:, 1:].unsqueeze(2),
).squeeze()
not_padding = expanded_target_ids[:, 1:] != pad_idx
intermed_scores *= not_padding.float()
return intermed_scores
else:
raise ValueError("adaptive softmax doesn't work with " +
"`normalized_scores_with_batch_vocab()`")
|
COCO-LM/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py",
"repo_id": "COCO-LM",
"token_count": 21564
}
| 164 |
# Deep Transformers with Latent Depth (Li et al., 2020)
[https://arxiv.org/abs/2009.13102](https://arxiv.org/abs/2009.13102).
## Introduction
We present a probabilistic framework to automatically learn which layer(s) to use by learning the posterior distributions of layer selection. As an extension of this framework, we propose a novel method to train one shared Transformer network for multilingual machine translation with different layer selection posteriors for each language pair.
## Training a multilingual model with latent depth
Below is an example of training with latent depth in decoder for one-to-many (O2M) related languages. We use the same preprocessed (numberized and binarized) TED8 dataset as in [Balancing Training for Multilingual Neural Machine Translation (Wang et al., 2020)](https://github.com/cindyxinyiwang/multiDDS), which could be generated by [the script](https://github.com/cindyxinyiwang/multiDDS/blob/multiDDS/util_scripts/prepare_multilingual_data.sh) the author provided.
```bash
lang_pairs_str="eng-aze,eng-bel,eng-ces,eng-glg,eng-por,eng-rus,eng-slk,eng-tur"
databin_dir=<path to binarized data>
fairseq-train ${databin_dir} \
--user-dir examples/latent_depth/latent_depth_src \
--lang-pairs "${lang_pairs_str}" \
--arch multilingual_transformer_iwslt_de_en \
--task multilingual_translation_latent_depth \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--share-encoders \
--share-decoders \
--decoder-langtok \
--share-decoder-input-output-embed \
--dropout 0.3 --attention-dropout 0.3 \
--optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \
--lr-scheduler inverse_sqrt --stop-min-lr 1e-9 --warmup-init-lr 1e-7 --warmup-updates 8000 \
--max-tokens 4096 --update-freq 1 \
--lr 0.0015 \
--clip-norm 1.0 \
--seed 2 \
--ddp-backend=legacy_ddp \
--encoder-layers 12 \
--decoder-layers 24 \
--decoder-latent-layer \
--sparsity-weight 0.1 \
--anneal-updates 5000 \
--soft-update 500 \
--target-layers 12 \
--share-weight 0.1
```
## Inference command
```bash
lang_pairs_str="eng-aze,eng-bel,eng-ces,eng-glg,eng-por,eng-rus,eng-slk,eng-tur"
databin_dir=<path to binarized data>
model_path=<path to checkpoint>
src_lang=<source language to translate from>
tgt_lang=<target language to translate to>
gen_data=<name of data split, e.g. valid, test, etc>
fairseq-generate ${databin_dir} \
--path ${model_path} \
--task multilingual_translation_latent_depth \
--decoder-latent-layer \
--lang-pairs "${lang_pairs_str}" \
-s ${src_lang} -t ${tgt_lang} \
--gen-subset $gen_data \
--scoring sacrebleu \
--remove-bpe 'sentencepiece' \
--lenpen 1.0 \
--beam 5 \
--decoder-langtok \
--max-tokens 4096
```
## Citation
```bibtex
@article{li2020deep,
title={Deep Transformers with Latent Depth},
author={Li, Xian and Stickland, Asa Cooper and Tang, Yuqing and Kong, Xiang},
journal={arXiv preprint arXiv:2009.13102},
year={2020}
}
```
|
COCO-LM/fairseq/examples/latent_depth/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/latent_depth/README.md",
"repo_id": "COCO-LM",
"token_count": 1061
}
| 165 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
from fairseq.models.transformer import TransformerEncoder
from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer
class LinformerTransformerEncoder(TransformerEncoder):
"""
Implementation for a Bi-directional Linformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
LinformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(self, args, dictionary, embed_tokens):
self.compress_layer = None
super().__init__(args, dictionary, embed_tokens)
def build_encoder_layer(self, args):
if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None:
compress_layer = nn.Linear(
self.args.max_positions,
self.args.max_positions // self.args.compressed,
)
# intialize parameters for compressed layer
nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2))
if self.args.freeze_compress == 1:
compress_layer.weight.requires_grad = False
self.compress_layer = compress_layer
return LinformerTransformerEncoderLayer(args, self.compress_layer)
|
COCO-LM/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py",
"repo_id": "COCO-LM",
"token_count": 768
}
| 166 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.